zte's code,first commit

Change-Id: I9a04da59e459a9bc0d67f101f700d9d7dc8d681b
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/Makefile b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/Makefile
new file mode 100644
index 0000000..e2516ba
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/Makefile
@@ -0,0 +1,33 @@
+#/*******************************************************************
+# * This file is part of the Emulex Linux Device Driver for         *
+# * Fibre Channel Host Bus Adapters.                                *
+# * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+# * EMULEX and SLI are trademarks of Emulex.                        *
+# * www.emulex.com                                                  *
+# *                                                                 *
+# * This program is free software; you can redistribute it and/or   *
+# * modify it under the terms of version 2 of the GNU General       *
+# * Public License as published by the Free Software Foundation.    *
+# * This program is distributed in the hope that it will be useful. *
+# * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+# * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+# * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+# * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+# * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+# * more details, a copy of which can be found in the file COPYING  *
+# * included with this package.                                     *
+# *******************************************************************/
+######################################################################
+
+ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage
+ccflags-$(GCOV) += -O0
+
+ifdef WARNINGS_BECOME_ERRORS
+ccflags-y += -Werror
+endif
+
+obj-$(CONFIG_SCSI_LPFC) := lpfc.o
+
+lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o	\
+	lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \
+	lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc.h b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc.h
new file mode 100644
index 0000000..3a1ffdd
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc.h
@@ -0,0 +1,1004 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <scsi/scsi_host.h>
+
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
+#define CONFIG_SCSI_LPFC_DEBUG_FS
+#endif
+
+struct lpfc_sli2_slim;
+
+#define LPFC_PCI_DEV_LP		0x1
+#define LPFC_PCI_DEV_OC		0x2
+
+#define LPFC_SLI_REV2		2
+#define LPFC_SLI_REV3		3
+#define LPFC_SLI_REV4		4
+
+#define LPFC_MAX_TARGET		4096	/* max number of targets supported */
+#define LPFC_MAX_DISC_THREADS	64	/* max outstanding discovery els
+					   requests */
+#define LPFC_MAX_NS_RETRY	3	/* Number of retry attempts to contact
+					   the NameServer  before giving up. */
+#define LPFC_CMD_PER_LUN	3	/* max outstanding cmds per lun */
+#define LPFC_DEFAULT_SG_SEG_CNT 64	/* sg element count per scsi cmnd */
+#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128	/* sg element count per scsi
+		cmnd for menlo needs nearly twice as for firmware
+		downloads using bsg */
+#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
+#define LPFC_MAX_SG_SEG_CNT	4096	/* sg element count per scsi cmnd */
+#define LPFC_MAX_SGE_SIZE       0x80000000 /* Maximum data allowed in a SGE */
+#define LPFC_MAX_PROT_SG_SEG_CNT 4096	/* prot sg element count per scsi cmd*/
+#define LPFC_IOCB_LIST_CNT	2250	/* list of IOCBs for fast-path usage. */
+#define LPFC_Q_RAMP_UP_INTERVAL 120     /* lun q_depth ramp up interval */
+#define LPFC_VNAME_LEN		100	/* vport symbolic name length */
+#define LPFC_TGTQ_INTERVAL	40000	/* Min amount of time between tgt
+					   queue depth change in millisecs */
+#define LPFC_TGTQ_RAMPUP_PCENT	5	/* Target queue rampup in percentage */
+#define LPFC_MIN_TGT_QDEPTH	10
+#define LPFC_MAX_TGT_QDEPTH	0xFFFF
+
+#define  LPFC_MAX_BUCKET_COUNT 20	/* Maximum no. of buckets for stat data
+					   collection. */
+/*
+ * Following time intervals are used of adjusting SCSI device
+ * queue depths when there are driver resource error or Firmware
+ * resource error.
+ */
+#define QUEUE_RAMP_DOWN_INTERVAL	(1 * HZ)   /* 1 Second */
+#define QUEUE_RAMP_UP_INTERVAL		(300 * HZ) /* 5 minutes */
+
+/* Number of exchanges reserved for discovery to complete */
+#define LPFC_DISC_IOCB_BUFF_COUNT 20
+
+#define LPFC_HB_MBOX_INTERVAL   5	/* Heart beat interval in seconds. */
+#define LPFC_HB_MBOX_TIMEOUT    30	/* Heart beat timeout  in seconds. */
+
+/* Error Attention event polling interval */
+#define LPFC_ERATT_POLL_INTERVAL	5 /* EATT poll interval in seconds */
+
+/* Define macros for 64 bit support */
+#define putPaddrLow(addr)    ((uint32_t) (0xffffffff & (u64)(addr)))
+#define putPaddrHigh(addr)   ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
+#define getPaddr(high, low)  ((dma_addr_t)( \
+			     (( (u64)(high)<<16 ) << 16)|( (u64)(low))))
+/* Provide maximum configuration definitions. */
+#define LPFC_DRVR_TIMEOUT	16	/* driver iocb timeout value in sec */
+#define FC_MAX_ADPTMSG		64
+
+#define MAX_HBAEVT	32
+
+/* Number of MSI-X vectors the driver uses */
+#define LPFC_MSIX_VECTORS	2
+
+/* lpfc wait event data ready flag */
+#define LPFC_DATA_READY		(1<<0)
+
+enum lpfc_polling_flags {
+	ENABLE_FCP_RING_POLLING = 0x1,
+	DISABLE_FCP_RING_INT    = 0x2
+};
+
+/* Provide DMA memory definitions the driver uses per port instance. */
+struct lpfc_dmabuf {
+	struct list_head list;
+	void *virt;		/* virtual address ptr */
+	dma_addr_t phys;	/* mapped address */
+	uint32_t   buffer_tag;	/* used for tagged queue ring */
+};
+
+struct lpfc_dma_pool {
+	struct lpfc_dmabuf   *elements;
+	uint32_t    max_count;
+	uint32_t    current_count;
+};
+
+struct hbq_dmabuf {
+	struct lpfc_dmabuf hbuf;
+	struct lpfc_dmabuf dbuf;
+	uint32_t size;
+	uint32_t tag;
+	struct lpfc_cq_event cq_event;
+	unsigned long time_stamp;
+};
+
+/* Priority bit.  Set value to exceed low water mark in lpfc_mem. */
+#define MEM_PRI		0x100
+
+
+/****************************************************************************/
+/*      Device VPD save area                                                */
+/****************************************************************************/
+typedef struct lpfc_vpd {
+	uint32_t status;	/* vpd status value */
+	uint32_t length;	/* number of bytes actually returned */
+	struct {
+		uint32_t rsvd1;	/* Revision numbers */
+		uint32_t biuRev;
+		uint32_t smRev;
+		uint32_t smFwRev;
+		uint32_t endecRev;
+		uint16_t rBit;
+		uint8_t fcphHigh;
+		uint8_t fcphLow;
+		uint8_t feaLevelHigh;
+		uint8_t feaLevelLow;
+		uint32_t postKernRev;
+		uint32_t opFwRev;
+		uint8_t opFwName[16];
+		uint32_t sli1FwRev;
+		uint8_t sli1FwName[16];
+		uint32_t sli2FwRev;
+		uint8_t sli2FwName[16];
+	} rev;
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint32_t rsvd3  :19;  /* Reserved                             */
+		uint32_t cdss	: 1;  /* Configure Data Security SLI          */
+		uint32_t rsvd2	: 3;  /* Reserved                             */
+		uint32_t cbg	: 1;  /* Configure BlockGuard                 */
+		uint32_t cmv	: 1;  /* Configure Max VPIs                   */
+		uint32_t ccrp   : 1;  /* Config Command Ring Polling          */
+		uint32_t csah   : 1;  /* Configure Synchronous Abort Handling */
+		uint32_t chbs   : 1;  /* Cofigure Host Backing store          */
+		uint32_t cinb   : 1;  /* Enable Interrupt Notification Block  */
+		uint32_t cerbm	: 1;  /* Configure Enhanced Receive Buf Mgmt  */
+		uint32_t cmx	: 1;  /* Configure Max XRIs                   */
+		uint32_t cmr	: 1;  /* Configure Max RPIs                   */
+#else	/*  __LITTLE_ENDIAN */
+		uint32_t cmr	: 1;  /* Configure Max RPIs                   */
+		uint32_t cmx	: 1;  /* Configure Max XRIs                   */
+		uint32_t cerbm	: 1;  /* Configure Enhanced Receive Buf Mgmt  */
+		uint32_t cinb   : 1;  /* Enable Interrupt Notification Block  */
+		uint32_t chbs   : 1;  /* Cofigure Host Backing store          */
+		uint32_t csah   : 1;  /* Configure Synchronous Abort Handling */
+		uint32_t ccrp   : 1;  /* Config Command Ring Polling          */
+		uint32_t cmv	: 1;  /* Configure Max VPIs                   */
+		uint32_t cbg	: 1;  /* Configure BlockGuard                 */
+		uint32_t rsvd2	: 3;  /* Reserved                             */
+		uint32_t cdss	: 1;  /* Configure Data Security SLI          */
+		uint32_t rsvd3  :19;  /* Reserved                             */
+#endif
+	} sli3Feat;
+} lpfc_vpd_t;
+
+struct lpfc_scsi_buf;
+
+
+/*
+ * lpfc stat counters
+ */
+struct lpfc_stats {
+	/* Statistics for ELS commands */
+	uint32_t elsLogiCol;
+	uint32_t elsRetryExceeded;
+	uint32_t elsXmitRetry;
+	uint32_t elsDelayRetry;
+	uint32_t elsRcvDrop;
+	uint32_t elsRcvFrame;
+	uint32_t elsRcvRSCN;
+	uint32_t elsRcvRNID;
+	uint32_t elsRcvFARP;
+	uint32_t elsRcvFARPR;
+	uint32_t elsRcvFLOGI;
+	uint32_t elsRcvPLOGI;
+	uint32_t elsRcvADISC;
+	uint32_t elsRcvPDISC;
+	uint32_t elsRcvFAN;
+	uint32_t elsRcvLOGO;
+	uint32_t elsRcvPRLO;
+	uint32_t elsRcvPRLI;
+	uint32_t elsRcvLIRR;
+	uint32_t elsRcvRLS;
+	uint32_t elsRcvRPS;
+	uint32_t elsRcvRPL;
+	uint32_t elsRcvRRQ;
+	uint32_t elsRcvRTV;
+	uint32_t elsRcvECHO;
+	uint32_t elsXmitFLOGI;
+	uint32_t elsXmitFDISC;
+	uint32_t elsXmitPLOGI;
+	uint32_t elsXmitPRLI;
+	uint32_t elsXmitADISC;
+	uint32_t elsXmitLOGO;
+	uint32_t elsXmitSCR;
+	uint32_t elsXmitRNID;
+	uint32_t elsXmitFARP;
+	uint32_t elsXmitFARPR;
+	uint32_t elsXmitACC;
+	uint32_t elsXmitLSRJT;
+
+	uint32_t frameRcvBcast;
+	uint32_t frameRcvMulti;
+	uint32_t strayXmitCmpl;
+	uint32_t frameXmitDelay;
+	uint32_t xriCmdCmpl;
+	uint32_t xriStatErr;
+	uint32_t LinkUp;
+	uint32_t LinkDown;
+	uint32_t LinkMultiEvent;
+	uint32_t NoRcvBuf;
+	uint32_t fcpCmd;
+	uint32_t fcpCmpl;
+	uint32_t fcpRspErr;
+	uint32_t fcpRemoteStop;
+	uint32_t fcpPortRjt;
+	uint32_t fcpPortBusy;
+	uint32_t fcpError;
+	uint32_t fcpLocalErr;
+};
+
+struct lpfc_hba;
+
+
+enum discovery_state {
+	LPFC_VPORT_UNKNOWN     =  0,    /* vport state is unknown */
+	LPFC_VPORT_FAILED      =  1,    /* vport has failed */
+	LPFC_LOCAL_CFG_LINK    =  6,    /* local NPORT Id configured */
+	LPFC_FLOGI             =  7,    /* FLOGI sent to Fabric */
+	LPFC_FDISC             =  8,    /* FDISC sent for vport */
+	LPFC_FABRIC_CFG_LINK   =  9,    /* Fabric assigned NPORT Id
+				         * configured */
+	LPFC_NS_REG            =  10,   /* Register with NameServer */
+	LPFC_NS_QRY            =  11,   /* Query NameServer for NPort ID list */
+	LPFC_BUILD_DISC_LIST   =  12,   /* Build ADISC and PLOGI lists for
+				         * device authentication / discovery */
+	LPFC_DISC_AUTH         =  13,   /* Processing ADISC list */
+	LPFC_VPORT_READY       =  32,
+};
+
+enum hba_state {
+	LPFC_LINK_UNKNOWN    =   0,   /* HBA state is unknown */
+	LPFC_WARM_START      =   1,   /* HBA state after selective reset */
+	LPFC_INIT_START      =   2,   /* Initial state after board reset */
+	LPFC_INIT_MBX_CMDS   =   3,   /* Initialize HBA with mbox commands */
+	LPFC_LINK_DOWN       =   4,   /* HBA initialized, link is down */
+	LPFC_LINK_UP         =   5,   /* Link is up  - issue READ_LA */
+	LPFC_CLEAR_LA        =   6,   /* authentication cmplt - issue
+				       * CLEAR_LA */
+	LPFC_HBA_READY       =  32,
+	LPFC_HBA_ERROR       =  -1
+};
+
+struct lpfc_vport {
+	struct lpfc_hba *phba;
+	struct list_head listentry;
+	uint8_t port_type;
+#define LPFC_PHYSICAL_PORT 1
+#define LPFC_NPIV_PORT  2
+#define LPFC_FABRIC_PORT 3
+	enum discovery_state port_state;
+
+	uint16_t vpi;
+	uint16_t vfi;
+	uint8_t vpi_state;
+#define LPFC_VPI_REGISTERED	0x1
+
+	uint32_t fc_flag;	/* FC flags */
+/* Several of these flags are HBA centric and should be moved to
+ * phba->link_flag (e.g. FC_PTP, FC_PUBLIC_LOOP)
+ */
+#define FC_PT2PT                0x1	 /* pt2pt with no fabric */
+#define FC_PT2PT_PLOGI          0x2	 /* pt2pt initiate PLOGI */
+#define FC_DISC_TMO             0x4	 /* Discovery timer running */
+#define FC_PUBLIC_LOOP          0x8	 /* Public loop */
+#define FC_LBIT                 0x10	 /* LOGIN bit in loopinit set */
+#define FC_RSCN_MODE            0x20	 /* RSCN cmd rcv'ed */
+#define FC_NLP_MORE             0x40	 /* More node to process in node tbl */
+#define FC_OFFLINE_MODE         0x80	 /* Interface is offline for diag */
+#define FC_FABRIC               0x100	 /* We are fabric attached */
+#define FC_VPORT_LOGO_RCVD      0x200    /* LOGO received on vport */
+#define FC_RSCN_DISCOVERY       0x400	 /* Auth all devices after RSCN */
+#define FC_LOGO_RCVD_DID_CHNG   0x800    /* FDISC on phys port detect DID chng*/
+#define FC_SCSI_SCAN_TMO        0x4000	 /* scsi scan timer running */
+#define FC_ABORT_DISCOVERY      0x8000	 /* we want to abort discovery */
+#define FC_NDISC_ACTIVE         0x10000	 /* NPort discovery active */
+#define FC_BYPASSED_MODE        0x20000	 /* NPort is in bypassed mode */
+#define FC_VPORT_NEEDS_REG_VPI	0x80000  /* Needs to have its vpi registered */
+#define FC_RSCN_DEFERRED	0x100000 /* A deferred RSCN being processed */
+#define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */
+#define FC_VPORT_CVL_RCVD	0x400000 /* VLink failed due to CVL	 */
+#define FC_VFI_REGISTERED	0x800000 /* VFI is registered */
+#define FC_FDISC_COMPLETED	0x1000000/* FDISC completed */
+#define FC_DISC_DELAYED		0x2000000/* Delay NPort discovery */
+
+	uint32_t ct_flags;
+#define FC_CT_RFF_ID		0x1	 /* RFF_ID accepted by switch */
+#define FC_CT_RNN_ID		0x2	 /* RNN_ID accepted by switch */
+#define FC_CT_RSNN_NN		0x4	 /* RSNN_NN accepted by switch */
+#define FC_CT_RSPN_ID		0x8	 /* RSPN_ID accepted by switch */
+#define FC_CT_RFT_ID		0x10	 /* RFT_ID accepted by switch */
+
+	struct list_head fc_nodes;
+
+	/* Keep counters for the number of entries in each list. */
+	uint16_t fc_plogi_cnt;
+	uint16_t fc_adisc_cnt;
+	uint16_t fc_reglogin_cnt;
+	uint16_t fc_prli_cnt;
+	uint16_t fc_unmap_cnt;
+	uint16_t fc_map_cnt;
+	uint16_t fc_npr_cnt;
+	uint16_t fc_unused_cnt;
+	struct serv_parm fc_sparam;	/* buffer for our service parameters */
+
+	uint32_t fc_myDID;	/* fibre channel S_ID */
+	uint32_t fc_prevDID;	/* previous fibre channel S_ID */
+	struct lpfc_name fabric_portname;
+	struct lpfc_name fabric_nodename;
+
+	int32_t stopped;   /* HBA has not been restarted since last ERATT */
+	uint8_t fc_linkspeed;	/* Link speed after last READ_LA */
+
+	uint32_t num_disc_nodes;	/*in addition to hba_state */
+
+	uint32_t fc_nlp_cnt;	/* outstanding NODELIST requests */
+	uint32_t fc_rscn_id_cnt;	/* count of RSCNs payloads in list */
+	uint32_t fc_rscn_flush;		/* flag use of fc_rscn_id_list */
+	struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
+	struct lpfc_name fc_nodename;	/* fc nodename */
+	struct lpfc_name fc_portname;	/* fc portname */
+
+	struct lpfc_work_evt disc_timeout_evt;
+
+	struct timer_list fc_disctmo;	/* Discovery rescue timer */
+	uint8_t fc_ns_retry;	/* retries for fabric nameserver */
+	uint32_t fc_prli_sent;	/* cntr for outstanding PRLIs */
+
+	spinlock_t work_port_lock;
+	uint32_t work_port_events; /* Timeout to be handled  */
+#define WORKER_DISC_TMO                0x1	/* vport: Discovery timeout */
+#define WORKER_ELS_TMO                 0x2	/* vport: ELS timeout */
+#define WORKER_FDMI_TMO                0x4	/* vport: FDMI timeout */
+#define WORKER_DELAYED_DISC_TMO        0x8	/* vport: delayed discovery */
+
+#define WORKER_MBOX_TMO                0x100	/* hba: MBOX timeout */
+#define WORKER_HB_TMO                  0x200	/* hba: Heart beat timeout */
+#define WORKER_FABRIC_BLOCK_TMO        0x400	/* hba: fabric block timeout */
+#define WORKER_RAMP_DOWN_QUEUE         0x800	/* hba: Decrease Q depth */
+#define WORKER_RAMP_UP_QUEUE           0x1000	/* hba: Increase Q depth */
+#define WORKER_SERVICE_TXQ             0x2000	/* hba: IOCBs on the txq */
+
+	struct timer_list fc_fdmitmo;
+	struct timer_list els_tmofunc;
+	struct timer_list delayed_disc_tmo;
+
+	int unreg_vpi_cmpl;
+
+	uint8_t load_flag;
+#define FC_LOADING		0x1	/* HBA in process of loading drvr */
+#define FC_UNLOADING		0x2	/* HBA in process of unloading drvr */
+	/* Vport Config Parameters */
+	uint32_t cfg_scan_down;
+	uint32_t cfg_lun_queue_depth;
+	uint32_t cfg_nodev_tmo;
+	uint32_t cfg_devloss_tmo;
+	uint32_t cfg_restrict_login;
+	uint32_t cfg_peer_port_login;
+	uint32_t cfg_fcp_class;
+	uint32_t cfg_use_adisc;
+	uint32_t cfg_fdmi_on;
+	uint32_t cfg_discovery_threads;
+	uint32_t cfg_log_verbose;
+	uint32_t cfg_max_luns;
+	uint32_t cfg_enable_da_id;
+	uint32_t cfg_max_scsicmpl_time;
+	uint32_t cfg_tgt_queue_depth;
+
+	uint32_t dev_loss_tmo_changed;
+
+	struct fc_vport *fc_vport;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	struct dentry *debug_disc_trc;
+	struct dentry *debug_nodelist;
+	struct dentry *vport_debugfs_root;
+	struct lpfc_debugfs_trc *disc_trc;
+	atomic_t disc_trc_cnt;
+#endif
+	uint8_t stat_data_enabled;
+	uint8_t stat_data_blocked;
+	struct list_head rcv_buffer_list;
+	unsigned long rcv_buffer_time_stamp;
+	uint32_t vport_flag;
+#define STATIC_VPORT	1
+};
+
+struct hbq_s {
+	uint16_t entry_count;	  /* Current number of HBQ slots */
+	uint16_t buffer_count;	  /* Current number of buffers posted */
+	uint32_t next_hbqPutIdx;  /* Index to next HBQ slot to use */
+	uint32_t hbqPutIdx;	  /* HBQ slot to use */
+	uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */
+	void    *hbq_virt;	  /* Virtual ptr to this hbq */
+	struct list_head hbq_buffer_list;  /* buffers assigned to this HBQ */
+				  /* Callback for HBQ buffer allocation */
+	struct hbq_dmabuf *(*hbq_alloc_buffer) (struct lpfc_hba *);
+				  /* Callback for HBQ buffer free */
+	void               (*hbq_free_buffer) (struct lpfc_hba *,
+					       struct hbq_dmabuf *);
+};
+
+#define LPFC_MAX_HBQS  4
+/* this matches the position in the lpfc_hbq_defs array */
+#define LPFC_ELS_HBQ	0
+#define LPFC_EXTRA_HBQ	1
+
+enum hba_temp_state {
+	HBA_NORMAL_TEMP,
+	HBA_OVER_TEMP
+};
+
+enum intr_type_t {
+	NONE = 0,
+	INTx,
+	MSI,
+	MSIX,
+};
+
+struct unsol_rcv_ct_ctx {
+	uint32_t ctxt_id;
+	uint32_t SID;
+	uint32_t flags;
+#define UNSOL_VALID	0x00000001
+	uint16_t oxid;
+	uint16_t rxid;
+};
+
+#define LPFC_USER_LINK_SPEED_AUTO	0	/* auto select (default)*/
+#define LPFC_USER_LINK_SPEED_1G		1	/* 1 Gigabaud */
+#define LPFC_USER_LINK_SPEED_2G		2	/* 2 Gigabaud */
+#define LPFC_USER_LINK_SPEED_4G		4	/* 4 Gigabaud */
+#define LPFC_USER_LINK_SPEED_8G		8	/* 8 Gigabaud */
+#define LPFC_USER_LINK_SPEED_10G	10	/* 10 Gigabaud */
+#define LPFC_USER_LINK_SPEED_16G	16	/* 16 Gigabaud */
+#define LPFC_USER_LINK_SPEED_MAX	LPFC_USER_LINK_SPEED_16G
+#define LPFC_USER_LINK_SPEED_BITMAP ((1 << LPFC_USER_LINK_SPEED_16G) | \
+				     (1 << LPFC_USER_LINK_SPEED_10G) | \
+				     (1 << LPFC_USER_LINK_SPEED_8G) | \
+				     (1 << LPFC_USER_LINK_SPEED_4G) | \
+				     (1 << LPFC_USER_LINK_SPEED_2G) | \
+				     (1 << LPFC_USER_LINK_SPEED_1G) | \
+				     (1 << LPFC_USER_LINK_SPEED_AUTO))
+#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16"
+
+enum nemb_type {
+	nemb_mse = 1,
+	nemb_hbd
+};
+
+enum mbox_type {
+	mbox_rd = 1,
+	mbox_wr
+};
+
+enum dma_type {
+	dma_mbox = 1,
+	dma_ebuf
+};
+
+enum sta_type {
+	sta_pre_addr = 1,
+	sta_pos_addr
+};
+
+struct lpfc_mbox_ext_buf_ctx {
+	uint32_t state;
+#define LPFC_BSG_MBOX_IDLE		0
+#define LPFC_BSG_MBOX_HOST              1
+#define LPFC_BSG_MBOX_PORT		2
+#define LPFC_BSG_MBOX_DONE		3
+#define LPFC_BSG_MBOX_ABTS		4
+	enum nemb_type nembType;
+	enum mbox_type mboxType;
+	uint32_t numBuf;
+	uint32_t mbxTag;
+	uint32_t seqNum;
+	struct lpfc_dmabuf *mbx_dmabuf;
+	struct list_head ext_dmabuf_list;
+};
+
+struct lpfc_hba {
+	/* SCSI interface function jump table entries */
+	int (*lpfc_new_scsi_buf)
+		(struct lpfc_vport *, int);
+	struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
+		(struct lpfc_hba *, struct lpfc_nodelist *);
+	int (*lpfc_scsi_prep_dma_buf)
+		(struct lpfc_hba *, struct lpfc_scsi_buf *);
+	void (*lpfc_scsi_unprep_dma_buf)
+		(struct lpfc_hba *, struct lpfc_scsi_buf *);
+	void (*lpfc_release_scsi_buf)
+		(struct lpfc_hba *, struct lpfc_scsi_buf *);
+	void (*lpfc_rampdown_queue_depth)
+		(struct lpfc_hba *);
+	void (*lpfc_scsi_prep_cmnd)
+		(struct lpfc_vport *, struct lpfc_scsi_buf *,
+		 struct lpfc_nodelist *);
+
+	/* IOCB interface function jump table entries */
+	int (*__lpfc_sli_issue_iocb)
+		(struct lpfc_hba *, uint32_t,
+		 struct lpfc_iocbq *, uint32_t);
+	void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
+			 struct lpfc_iocbq *);
+	int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
+	IOCB_t * (*lpfc_get_iocb_from_iocbq)
+		(struct lpfc_iocbq *);
+	void (*lpfc_scsi_cmd_iocb_cmpl)
+		(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
+
+	/* MBOX interface function jump table entries */
+	int (*lpfc_sli_issue_mbox)
+		(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+
+	/* Slow-path IOCB process function jump table entries */
+	void (*lpfc_sli_handle_slow_ring_event)
+		(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+		 uint32_t mask);
+
+	/* INIT device interface function jump table entries */
+	int (*lpfc_sli_hbq_to_firmware)
+		(struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
+	int (*lpfc_sli_brdrestart)
+		(struct lpfc_hba *);
+	int (*lpfc_sli_brdready)
+		(struct lpfc_hba *, uint32_t);
+	void (*lpfc_handle_eratt)
+		(struct lpfc_hba *);
+	void (*lpfc_stop_port)
+		(struct lpfc_hba *);
+	int (*lpfc_hba_init_link)
+		(struct lpfc_hba *, uint32_t);
+	int (*lpfc_hba_down_link)
+		(struct lpfc_hba *, uint32_t);
+	int (*lpfc_selective_reset)
+		(struct lpfc_hba *);
+
+	int (*lpfc_bg_scsi_prep_dma_buf)
+		(struct lpfc_hba *, struct lpfc_scsi_buf *);
+	/* Add new entries here */
+
+	/* SLI4 specific HBA data structure */
+	struct lpfc_sli4_hba sli4_hba;
+
+	struct lpfc_sli sli;
+	uint8_t pci_dev_grp;	/* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
+	uint32_t sli_rev;		/* SLI2, SLI3, or SLI4 */
+	uint32_t sli3_options;		/* Mask of enabled SLI3 options */
+#define LPFC_SLI3_HBQ_ENABLED		0x01
+#define LPFC_SLI3_NPIV_ENABLED		0x02
+#define LPFC_SLI3_VPORT_TEARDOWN	0x04
+#define LPFC_SLI3_CRP_ENABLED		0x08
+#define LPFC_SLI3_BG_ENABLED		0x20
+#define LPFC_SLI3_DSS_ENABLED		0x40
+#define LPFC_SLI4_PERFH_ENABLED		0x80
+#define LPFC_SLI4_PHWQ_ENABLED		0x100
+	uint32_t iocb_cmd_size;
+	uint32_t iocb_rsp_size;
+
+	enum hba_state link_state;
+	uint32_t link_flag;	/* link state flags */
+#define LS_LOOPBACK_MODE      0x1	/* NPort is in Loopback mode */
+					/* This flag is set while issuing */
+					/* INIT_LINK mailbox command */
+#define LS_NPIV_FAB_SUPPORTED 0x2	/* Fabric supports NPIV */
+#define LS_IGNORE_ERATT       0x4	/* intr handler should ignore ERATT */
+
+	uint32_t hba_flag;	/* hba generic flags */
+#define HBA_ERATT_HANDLED	0x1 /* This flag is set when eratt handled */
+#define DEFER_ERATT		0x2 /* Deferred error attention in progress */
+#define HBA_FCOE_MODE		0x4 /* HBA function in FCoE Mode */
+#define HBA_SP_QUEUE_EVT	0x8 /* Slow-path qevt posted to worker thread*/
+#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
+#define FCP_XRI_ABORT_EVENT	0x20
+#define ELS_XRI_ABORT_EVENT	0x40
+#define ASYNC_EVENT		0x80
+#define LINK_DISABLED		0x100 /* Link disabled by user */
+#define FCF_TS_INPROG           0x200 /* FCF table scan in progress */
+#define FCF_RR_INPROG           0x400 /* FCF roundrobin flogi in progress */
+#define HBA_FIP_SUPPORT		0x800 /* FIP support in HBA */
+#define HBA_AER_ENABLED		0x1000 /* AER enabled with HBA */
+#define HBA_DEVLOSS_TMO         0x2000 /* HBA in devloss timeout */
+#define HBA_RRQ_ACTIVE		0x4000 /* process the rrq active list */
+	uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
+	struct lpfc_dmabuf slim2p;
+
+	MAILBOX_t *mbox;
+	uint32_t *mbox_ext;
+	struct lpfc_mbox_ext_buf_ctx mbox_ext_buf_ctx;
+	uint32_t ha_copy;
+	struct _PCB *pcb;
+	struct _IOCB *IOCBs;
+
+	struct lpfc_dmabuf hbqslimp;
+
+	uint16_t pci_cfg_value;
+
+	uint8_t fc_linkspeed;	/* Link speed after last READ_LA */
+
+	uint32_t fc_eventTag;	/* event tag for link attention */
+	uint32_t link_events;
+
+	/* These fields used to be binfo */
+	uint32_t fc_pref_DID;	/* preferred D_ID */
+	uint8_t  fc_pref_ALPA;	/* preferred AL_PA */
+	uint32_t fc_edtovResol; /* E_D_TOV timer resolution */
+	uint32_t fc_edtov;	/* E_D_TOV timer value */
+	uint32_t fc_arbtov;	/* ARB_TOV timer value */
+	uint32_t fc_ratov;	/* R_A_TOV timer value */
+	uint32_t fc_rttov;	/* R_T_TOV timer value */
+	uint32_t fc_altov;	/* AL_TOV timer value */
+	uint32_t fc_crtov;	/* C_R_TOV timer value */
+	uint32_t fc_citov;	/* C_I_TOV timer value */
+
+	struct serv_parm fc_fabparam;	/* fabric service parameters buffer */
+	uint8_t alpa_map[128];	/* AL_PA map from READ_LA */
+
+	uint32_t lmt;
+
+	uint32_t fc_topology;	/* link topology, from LINK INIT */
+
+	struct lpfc_stats fc_stat;
+
+	struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */
+	uint32_t nport_event_cnt;	/* timestamp for nlplist entry */
+
+	uint8_t  wwnn[8];
+	uint8_t  wwpn[8];
+	uint32_t RandomData[7];
+
+	/* HBA Config Parameters */
+	uint32_t cfg_ack0;
+	uint32_t cfg_enable_npiv;
+	uint32_t cfg_enable_rrq;
+	uint32_t cfg_topology;
+	uint32_t cfg_link_speed;
+#define LPFC_FCF_FOV 1		/* Fast fcf failover */
+#define LPFC_FCF_PRIORITY 2	/* Priority fcf failover */
+	uint32_t cfg_fcf_failover_policy;
+	uint32_t cfg_cr_delay;
+	uint32_t cfg_cr_count;
+	uint32_t cfg_multi_ring_support;
+	uint32_t cfg_multi_ring_rctl;
+	uint32_t cfg_multi_ring_type;
+	uint32_t cfg_poll;
+	uint32_t cfg_poll_tmo;
+	uint32_t cfg_use_msi;
+	uint32_t cfg_fcp_imax;
+	uint32_t cfg_fcp_wq_count;
+	uint32_t cfg_fcp_eq_count;
+	uint32_t cfg_sg_seg_cnt;
+	uint32_t cfg_prot_sg_seg_cnt;
+	uint32_t cfg_sg_dma_buf_size;
+	uint64_t cfg_soft_wwnn;
+	uint64_t cfg_soft_wwpn;
+	uint32_t cfg_hba_queue_depth;
+	uint32_t cfg_enable_hba_reset;
+	uint32_t cfg_enable_hba_heartbeat;
+	uint32_t cfg_enable_bg;
+	uint32_t cfg_hostmem_hgp;
+	uint32_t cfg_log_verbose;
+	uint32_t cfg_aer_support;
+	uint32_t cfg_sriov_nr_virtfn;
+	uint32_t cfg_iocb_cnt;
+	uint32_t cfg_suppress_link_up;
+#define LPFC_INITIALIZE_LINK              0	/* do normal init_link mbox */
+#define LPFC_DELAY_INIT_LINK              1	/* layered driver hold off */
+#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2	/* wait, manual intervention */
+	uint32_t cfg_enable_dss;
+	lpfc_vpd_t vpd;		/* vital product data */
+
+	struct pci_dev *pcidev;
+	struct list_head      work_list;
+	uint32_t              work_ha;      /* Host Attention Bits for WT */
+	uint32_t              work_ha_mask; /* HA Bits owned by WT        */
+	uint32_t              work_hs;      /* HS stored in case of ERRAT */
+	uint32_t              work_status[2]; /* Extra status from SLIM */
+
+	wait_queue_head_t    work_waitq;
+	struct task_struct   *worker_thread;
+	unsigned long data_flags;
+
+	uint32_t hbq_in_use;		/* HBQs in use flag */
+	struct list_head rb_pend_list;  /* Received buffers to be processed */
+	uint32_t hbq_count;	        /* Count of configured HBQs */
+	struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies  */
+
+	uint32_t fcp_qidx;		/* next work queue to post work to */
+
+	unsigned long pci_bar0_map;     /* Physical address for PCI BAR0 */
+	unsigned long pci_bar1_map;     /* Physical address for PCI BAR1 */
+	unsigned long pci_bar2_map;     /* Physical address for PCI BAR2 */
+	void __iomem *slim_memmap_p;	/* Kernel memory mapped address for
+					   PCI BAR0 */
+	void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for
+					    PCI BAR2 */
+
+	void __iomem *MBslimaddr;	/* virtual address for mbox cmds */
+	void __iomem *HAregaddr;	/* virtual address for host attn reg */
+	void __iomem *CAregaddr;	/* virtual address for chip attn reg */
+	void __iomem *HSregaddr;	/* virtual address for host status
+					   reg */
+	void __iomem *HCregaddr;	/* virtual address for host ctl reg */
+
+	struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */
+	struct lpfc_pgp   *port_gp;
+	uint32_t __iomem  *hbq_put;     /* Address in SLIM to HBQ put ptrs */
+	uint32_t          *hbq_get;     /* Host mem address of HBQ get ptrs */
+
+	int brd_no;			/* FC board number */
+	char SerialNumber[32];		/* adapter Serial Number */
+	char OptionROMVersion[32];	/* adapter BIOS / Fcode version */
+	char ModelDesc[256];		/* Model Description */
+	char ModelName[80];		/* Model Name */
+	char ProgramType[256];		/* Program Type */
+	char Port[20];			/* Port No */
+	uint8_t vpd_flag;               /* VPD data flag */
+
+#define VPD_MODEL_DESC      0x1         /* valid vpd model description */
+#define VPD_MODEL_NAME      0x2         /* valid vpd model name */
+#define VPD_PROGRAM_TYPE    0x4         /* valid vpd program type */
+#define VPD_PORT            0x8         /* valid vpd port data */
+#define VPD_MASK            0xf         /* mask for any vpd data */
+
+	uint8_t soft_wwn_enable;
+
+	struct timer_list fcp_poll_timer;
+	struct timer_list eratt_poll;
+
+	/*
+	 * stat  counters
+	 */
+	uint64_t fc4InputRequests;
+	uint64_t fc4OutputRequests;
+	uint64_t fc4ControlRequests;
+	uint64_t bg_guard_err_cnt;
+	uint64_t bg_apptag_err_cnt;
+	uint64_t bg_reftag_err_cnt;
+
+	/* fastpath list. */
+	spinlock_t scsi_buf_list_lock;
+	struct list_head lpfc_scsi_buf_list;
+	uint32_t total_scsi_bufs;
+	struct list_head lpfc_iocb_list;
+	uint32_t total_iocbq_bufs;
+	struct list_head active_rrq_list;
+	spinlock_t hbalock;
+
+	/* pci_mem_pools */
+	struct pci_pool *lpfc_scsi_dma_buf_pool;
+	struct pci_pool *lpfc_mbuf_pool;
+	struct pci_pool *lpfc_hrb_pool;	/* header receive buffer pool */
+	struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
+	struct pci_pool *lpfc_hbq_pool;	/* SLI3 hbq buffer pool */
+	struct lpfc_dma_pool lpfc_mbuf_safety_pool;
+
+	mempool_t *mbox_mem_pool;
+	mempool_t *nlp_mem_pool;
+	mempool_t *rrq_pool;
+
+	struct fc_host_statistics link_stats;
+	enum intr_type_t intr_type;
+	uint32_t intr_mode;
+#define LPFC_INTR_ERROR	0xFFFFFFFF
+	struct msix_entry msix_entries[LPFC_MSIX_VECTORS];
+
+	struct list_head port_list;
+	struct lpfc_vport *pport;	/* physical lpfc_vport pointer */
+	uint16_t max_vpi;		/* Maximum virtual nports */
+#define LPFC_MAX_VPI 0xFFFF		/* Max number of VPI supported */
+	uint16_t max_vports;            /*
+					 * For IOV HBAs max_vpi can change
+					 * after a reset. max_vports is max
+					 * number of vports present. This can
+					 * be greater than max_vpi.
+					 */
+	uint16_t vpi_base;
+	uint16_t vfi_base;
+	unsigned long *vpi_bmask;	/* vpi allocation table */
+	uint16_t *vpi_ids;
+	uint16_t vpi_count;
+	struct list_head lpfc_vpi_blk_list;
+
+	/* Data structure used by fabric iocb scheduler */
+	struct list_head fabric_iocb_list;
+	atomic_t fabric_iocb_count;
+	struct timer_list fabric_block_timer;
+	unsigned long bit_flags;
+#define	FABRIC_COMANDS_BLOCKED	0
+	atomic_t num_rsrc_err;
+	atomic_t num_cmd_success;
+	unsigned long last_rsrc_error_time;
+	unsigned long last_ramp_down_time;
+	unsigned long last_ramp_up_time;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	struct dentry *hba_debugfs_root;
+	atomic_t debugfs_vport_count;
+	struct dentry *debug_hbqinfo;
+	struct dentry *debug_dumpHostSlim;
+	struct dentry *debug_dumpHBASlim;
+	struct dentry *debug_dumpData;   /* BlockGuard BPL */
+	struct dentry *debug_dumpDif;    /* BlockGuard BPL */
+	struct dentry *debug_InjErrLBA;  /* LBA to inject errors at */
+	struct dentry *debug_InjErrNPortID;  /* NPortID to inject errors at */
+	struct dentry *debug_InjErrWWPN;  /* WWPN to inject errors at */
+	struct dentry *debug_writeGuard; /* inject write guard_tag errors */
+	struct dentry *debug_writeApp;   /* inject write app_tag errors */
+	struct dentry *debug_writeRef;   /* inject write ref_tag errors */
+	struct dentry *debug_readGuard;  /* inject read guard_tag errors */
+	struct dentry *debug_readApp;    /* inject read app_tag errors */
+	struct dentry *debug_readRef;    /* inject read ref_tag errors */
+
+	/* T10 DIF error injection */
+	uint32_t lpfc_injerr_wgrd_cnt;
+	uint32_t lpfc_injerr_wapp_cnt;
+	uint32_t lpfc_injerr_wref_cnt;
+	uint32_t lpfc_injerr_rgrd_cnt;
+	uint32_t lpfc_injerr_rapp_cnt;
+	uint32_t lpfc_injerr_rref_cnt;
+	uint32_t lpfc_injerr_nportid;
+	struct lpfc_name lpfc_injerr_wwpn;
+	sector_t lpfc_injerr_lba;
+#define LPFC_INJERR_LBA_OFF	(sector_t)(-1)
+
+	struct dentry *debug_slow_ring_trc;
+	struct lpfc_debugfs_trc *slow_ring_trc;
+	atomic_t slow_ring_trc_cnt;
+	/* iDiag debugfs sub-directory */
+	struct dentry *idiag_root;
+	struct dentry *idiag_pci_cfg;
+	struct dentry *idiag_bar_acc;
+	struct dentry *idiag_que_info;
+	struct dentry *idiag_que_acc;
+	struct dentry *idiag_drb_acc;
+	struct dentry *idiag_ctl_acc;
+	struct dentry *idiag_mbx_acc;
+	struct dentry *idiag_ext_acc;
+#endif
+
+	/* Used for deferred freeing of ELS data buffers */
+	struct list_head elsbuf;
+	int elsbuf_cnt;
+	int elsbuf_prev_cnt;
+
+	uint8_t temp_sensor_support;
+	/* Fields used for heart beat. */
+	unsigned long last_completion_time;
+	unsigned long skipped_hb;
+	struct timer_list hb_tmofunc;
+	uint8_t hb_outstanding;
+	struct timer_list rrq_tmr;
+	enum hba_temp_state over_temp_state;
+	/* ndlp reference management */
+	spinlock_t ndlp_lock;
+	/*
+	 * Following bit will be set for all buffer tags which are not
+	 * associated with any HBQ.
+	 */
+#define QUE_BUFTAG_BIT  (1<<31)
+	uint32_t buffer_tag_count;
+	int wait_4_mlo_maint_flg;
+	wait_queue_head_t wait_4_mlo_m_q;
+	/* data structure used for latency data collection */
+#define LPFC_NO_BUCKET	   0
+#define LPFC_LINEAR_BUCKET 1
+#define LPFC_POWER2_BUCKET 2
+	uint8_t  bucket_type;
+	uint32_t bucket_base;
+	uint32_t bucket_step;
+
+/* Maximum number of events that can be outstanding at any time*/
+#define LPFC_MAX_EVT_COUNT 512
+	atomic_t fast_event_count;
+	uint32_t fcoe_eventtag;
+	uint32_t fcoe_eventtag_at_fcf_scan;
+	uint32_t fcoe_cvl_eventtag;
+	uint32_t fcoe_cvl_eventtag_attn;
+	struct lpfc_fcf fcf;
+	uint8_t fc_map[3];
+	uint8_t valid_vlan;
+	uint16_t vlan_id;
+	struct list_head fcf_conn_rec_list;
+
+	spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
+	struct list_head ct_ev_waiters;
+	struct unsol_rcv_ct_ctx ct_ctx[64];
+	uint32_t ctx_idx;
+
+	uint8_t menlo_flag;	/* menlo generic flags */
+#define HBA_MENLO_SUPPORT	0x1 /* HBA supports menlo commands */
+	uint32_t iocb_cnt;
+	uint32_t iocb_max;
+	atomic_t sdev_cnt;
+	uint8_t fips_spec_rev;
+	uint8_t fips_level;
+};
+
+static inline struct Scsi_Host *
+lpfc_shost_from_vport(struct lpfc_vport *vport)
+{
+	return container_of((void *) vport, struct Scsi_Host, hostdata[0]);
+}
+
+static inline void
+lpfc_set_loopback_flag(struct lpfc_hba *phba)
+{
+	if (phba->cfg_topology == FLAGS_LOCAL_LB)
+		phba->link_flag |= LS_LOOPBACK_MODE;
+	else
+		phba->link_flag &= ~LS_LOOPBACK_MODE;
+}
+
+static inline int
+lpfc_is_link_up(struct lpfc_hba *phba)
+{
+	return  phba->link_state == LPFC_LINK_UP ||
+		phba->link_state == LPFC_CLEAR_LA ||
+		phba->link_state == LPFC_HBA_READY;
+}
+
+static inline void
+lpfc_worker_wake_up(struct lpfc_hba *phba)
+{
+	/* Set the lpfc data pending flag */
+	set_bit(LPFC_DATA_READY, &phba->data_flags);
+
+	/* Wake up worker thread */
+	wake_up(&phba->work_waitq);
+	return;
+}
+
+static inline int
+lpfc_readl(void __iomem *addr, uint32_t *data)
+{
+	uint32_t temp;
+	temp = readl(addr);
+	if (temp == 0xffffffff)
+		return -EIO;
+	*data = temp;
+	return 0;
+}
+
+static inline int
+lpfc_sli_read_hs(struct lpfc_hba *phba)
+{
+	/*
+	 * There was a link/board error. Read the status register to retrieve
+	 * the error event and process it.
+	 */
+	phba->sli.slistat.err_attn_event++;
+
+	/* Save status info and check for unplug error */
+	if (lpfc_readl(phba->HSregaddr, &phba->work_hs) ||
+		lpfc_readl(phba->MBslimaddr + 0xa8, &phba->work_status[0]) ||
+		lpfc_readl(phba->MBslimaddr + 0xac, &phba->work_status[1])) {
+		return -EIO;
+	}
+
+	/* Clear chip Host Attention error bit */
+	writel(HA_ERATT, phba->HAregaddr);
+	readl(phba->HAregaddr); /* flush */
+	phba->pport->stopped = 1;
+
+	return 0;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_attr.c b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_attr.c
new file mode 100644
index 0000000..5eb2bc1
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_attr.c
@@ -0,0 +1,4950 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/aer.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_version.h"
+#include "lpfc_compat.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+
+#define LPFC_DEF_DEVLOSS_TMO 30
+#define LPFC_MIN_DEVLOSS_TMO 1
+#define LPFC_MAX_DEVLOSS_TMO 255
+
+/*
+ * Write key size should be multiple of 4. If write key is changed
+ * make sure that library write key is also changed.
+ */
+#define LPFC_REG_WRITE_KEY_SIZE	4
+#define LPFC_REG_WRITE_KEY	"EMLX"
+
+/**
+ * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
+ * @incr: integer to convert.
+ * @hdw: ascii string holding converted integer plus a string terminator.
+ *
+ * Description:
+ * JEDEC Joint Electron Device Engineering Council.
+ * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii
+ * character string. The string is then terminated with a NULL in byte 9.
+ * Hex 0-9 becomes ascii '0' to '9'.
+ * Hex a-f becomes ascii '=' to 'B' capital B.
+ *
+ * Notes:
+ * Coded for 32 bit integers only.
+ **/
+static void
+lpfc_jedec_to_ascii(int incr, char hdw[])
+{
+	int i, j;
+	for (i = 0; i < 8; i++) {
+		j = (incr & 0xf);
+		if (j <= 9)
+			hdw[7 - i] = 0x30 +  j;
+		 else
+			hdw[7 - i] = 0x61 + j - 10;
+		incr = (incr >> 4);
+	}
+	hdw[8] = 0;
+	return;
+}
+
+/**
+ * lpfc_drvr_version_show - Return the Emulex driver string with version number
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
+		       char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
+}
+
+/**
+ * lpfc_enable_fip_show - Return the fip mode of the HBA
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
+		       char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	if (phba->hba_flag & HBA_FIP_SUPPORT)
+		return snprintf(buf, PAGE_SIZE, "1\n");
+	else
+		return snprintf(buf, PAGE_SIZE, "0\n");
+}
+
+static ssize_t
+lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	if (phba->cfg_enable_bg)
+		if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
+			return snprintf(buf, PAGE_SIZE, "BlockGuard Enabled\n");
+		else
+			return snprintf(buf, PAGE_SIZE,
+					"BlockGuard Not Supported\n");
+	else
+			return snprintf(buf, PAGE_SIZE,
+					"BlockGuard Disabled\n");
+}
+
+static ssize_t
+lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
+		       char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%llu\n",
+			(unsigned long long)phba->bg_guard_err_cnt);
+}
+
+static ssize_t
+lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%llu\n",
+			(unsigned long long)phba->bg_apptag_err_cnt);
+}
+
+static ssize_t
+lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%llu\n",
+			(unsigned long long)phba->bg_reftag_err_cnt);
+}
+
+/**
+ * lpfc_info_show - Return some pci info about the host in ascii
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted text from lpfc_info().
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_info_show(struct device *dev, struct device_attribute *attr,
+	       char *buf)
+{
+	struct Scsi_Host *host = class_to_shost(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
+}
+
+/**
+ * lpfc_serialnum_show - Return the hba serial number in ascii
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted text serial number.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
+		    char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
+}
+
+/**
+ * lpfc_temp_sensor_show - Return the temperature sensor level
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted support level.
+ *
+ * Description:
+ * Returns a number indicating the temperature sensor level currently
+ * supported, zero or one in ascii.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
+		      char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support);
+}
+
+/**
+ * lpfc_modeldesc_show - Return the model description of the hba
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd model description.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
+		    char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc);
+}
+
+/**
+ * lpfc_modelname_show - Return the model name of the hba
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd model name.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
+		    char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName);
+}
+
+/**
+ * lpfc_programtype_show - Return the program type of the hba
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd program type.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
+		      char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType);
+}
+
+/**
+ * lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the Menlo Maintenance sli flag.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+		(phba->sli.sli_flag & LPFC_MENLO_MAINT));
+}
+
+/**
+ * lpfc_vportnum_show - Return the port number in ascii of the hba
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains scsi vpd program type.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
+		   char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port);
+}
+
+/**
+ * lpfc_fwrev_show - Return the firmware rev running in the hba
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd program type.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t if_type;
+	uint8_t sli_family;
+	char fwrev[FW_REV_STR_SIZE];
+	int len;
+
+	lpfc_decode_firmware_rev(phba, fwrev, 1);
+	if_type = phba->sli4_hba.pc_sli4_params.if_type;
+	sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		len = snprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
+			       fwrev, phba->sli_rev);
+	else
+		len = snprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
+			       fwrev, phba->sli_rev, if_type, sli_family);
+
+	return len;
+}
+
+/**
+ * lpfc_hdw_show - Return the jedec information about the hba
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd program type.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	char hdw[9];
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	lpfc_vpd_t *vp = &phba->vpd;
+
+	lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
+	return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
+}
+
+/**
+ * lpfc_option_rom_version_show - Return the adapter ROM FCode version
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the ROM and FCode ascii strings.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
+}
+
+/**
+ * lpfc_state_show - Return the link state of the port
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains text describing the state of the link.
+ *
+ * Notes:
+ * The switch statement has no default so zero will be returned.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
+		     char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	int  len = 0;
+
+	switch (phba->link_state) {
+	case LPFC_LINK_UNKNOWN:
+	case LPFC_WARM_START:
+	case LPFC_INIT_START:
+	case LPFC_INIT_MBX_CMDS:
+	case LPFC_LINK_DOWN:
+	case LPFC_HBA_ERROR:
+		if (phba->hba_flag & LINK_DISABLED)
+			len += snprintf(buf + len, PAGE_SIZE-len,
+				"Link Down - User disabled\n");
+		else
+			len += snprintf(buf + len, PAGE_SIZE-len,
+				"Link Down\n");
+		break;
+	case LPFC_LINK_UP:
+	case LPFC_CLEAR_LA:
+	case LPFC_HBA_READY:
+		len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
+
+		switch (vport->port_state) {
+		case LPFC_LOCAL_CFG_LINK:
+			len += snprintf(buf + len, PAGE_SIZE-len,
+					"Configuring Link\n");
+			break;
+		case LPFC_FDISC:
+		case LPFC_FLOGI:
+		case LPFC_FABRIC_CFG_LINK:
+		case LPFC_NS_REG:
+		case LPFC_NS_QRY:
+		case LPFC_BUILD_DISC_LIST:
+		case LPFC_DISC_AUTH:
+			len += snprintf(buf + len, PAGE_SIZE - len,
+					"Discovery\n");
+			break;
+		case LPFC_VPORT_READY:
+			len += snprintf(buf + len, PAGE_SIZE - len, "Ready\n");
+			break;
+
+		case LPFC_VPORT_FAILED:
+			len += snprintf(buf + len, PAGE_SIZE - len, "Failed\n");
+			break;
+
+		case LPFC_VPORT_UNKNOWN:
+			len += snprintf(buf + len, PAGE_SIZE - len,
+					"Unknown\n");
+			break;
+		}
+		if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
+			len += snprintf(buf + len, PAGE_SIZE-len,
+					"   Menlo Maint Mode\n");
+		else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+			if (vport->fc_flag & FC_PUBLIC_LOOP)
+				len += snprintf(buf + len, PAGE_SIZE-len,
+						"   Public Loop\n");
+			else
+				len += snprintf(buf + len, PAGE_SIZE-len,
+						"   Private Loop\n");
+		} else {
+			if (vport->fc_flag & FC_FABRIC)
+				len += snprintf(buf + len, PAGE_SIZE-len,
+						"   Fabric\n");
+			else
+				len += snprintf(buf + len, PAGE_SIZE-len,
+						"   Point-2-Point\n");
+		}
+	}
+
+	return len;
+}
+
+/**
+ * lpfc_sli4_protocol_show - Return the fip mode of the HBA
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		return snprintf(buf, PAGE_SIZE, "fc\n");
+
+	if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
+		if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
+			return snprintf(buf, PAGE_SIZE, "fcoe\n");
+		if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
+			return snprintf(buf, PAGE_SIZE, "fc\n");
+	}
+	return snprintf(buf, PAGE_SIZE, "unknown\n");
+}
+
+/**
+ * lpfc_link_state_store - Transition the link_state on an HBA port
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: one or more lpfc_polling_flags values.
+ * @count: not used.
+ *
+ * Returns:
+ * -EINVAL if the buffer is not "up" or "down"
+ * return from link state change function if non-zero
+ * length of the buf on success
+ **/
+static ssize_t
+lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	int status = -EINVAL;
+
+	if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
+			(phba->link_state == LPFC_LINK_DOWN))
+		status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+	else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
+			(phba->link_state >= LPFC_LINK_UP))
+		status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT);
+
+	if (status == 0)
+		return strlen(buf);
+	else
+		return status;
+}
+
+/**
+ * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the sum of fc mapped and unmapped.
+ *
+ * Description:
+ * Returns the ascii text number of the sum of the fc mapped and unmapped
+ * vport counts.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_num_discovered_ports_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			vport->fc_map_cnt + vport->fc_unmap_cnt);
+}
+
+/**
+ * lpfc_issue_lip - Misnomer, name carried over from long ago
+ * @shost: Scsi_Host pointer.
+ *
+ * Description:
+ * Bring the link down gracefully then re-init the link. The firmware will
+ * re-init the fiber channel interface as required. Does not issue a LIP.
+ *
+ * Returns:
+ * -EPERM port offline or management commands are being blocked
+ * -ENOMEM cannot allocate memory for the mailbox command
+ * -EIO error sending the mailbox command
+ * zero for success
+ **/
+static int
+lpfc_issue_lip(struct Scsi_Host *shost)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	LPFC_MBOXQ_t *pmboxq;
+	int mbxstatus = MBXERR_ERROR;
+
+	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+	    (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
+		return -EPERM;
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
+
+	if (!pmboxq)
+		return -ENOMEM;
+
+	memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+	pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
+	pmboxq->u.mb.mbxOwner = OWN_HOST;
+
+	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
+
+	if ((mbxstatus == MBX_SUCCESS) &&
+	    (pmboxq->u.mb.mbxStatus == 0 ||
+	     pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
+		memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+		lpfc_init_link(phba, pmboxq, phba->cfg_topology,
+			       phba->cfg_link_speed);
+		mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
+						     phba->fc_ratov * 2);
+		if ((mbxstatus == MBX_SUCCESS) &&
+		    (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"2859 SLI authentication is required "
+					"for INIT_LINK but has not done yet\n");
+	}
+
+	lpfc_set_loopback_flag(phba);
+	if (mbxstatus != MBX_TIMEOUT)
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+
+	if (mbxstatus == MBXERR_ERROR)
+		return -EIO;
+
+	return 0;
+}
+
+/**
+ * lpfc_do_offline - Issues a mailbox command to bring the link down
+ * @phba: lpfc_hba pointer.
+ * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL.
+ *
+ * Notes:
+ * Assumes any error from lpfc_do_offline() will be negative.
+ * Can wait up to 5 seconds for the port ring buffers count
+ * to reach zero, prints a warning if it is not zero and continues.
+ * lpfc_workq_post_event() returns a non-zero return code if call fails.
+ *
+ * Returns:
+ * -EIO error posting the event
+ * zero for success
+ **/
+static int
+lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
+{
+	struct completion online_compl;
+	struct lpfc_sli_ring *pring;
+	struct lpfc_sli *psli;
+	int status = 0;
+	int cnt = 0;
+	int i;
+	int rc;
+
+	init_completion(&online_compl);
+	rc = lpfc_workq_post_event(phba, &status, &online_compl,
+			      LPFC_EVT_OFFLINE_PREP);
+	if (rc == 0)
+		return -ENOMEM;
+
+	wait_for_completion(&online_compl);
+
+	if (status != 0)
+		return -EIO;
+
+	psli = &phba->sli;
+
+	/* Wait a little for things to settle down, but not
+	 * long enough for dev loss timeout to expire.
+	 */
+	for (i = 0; i < psli->num_rings; i++) {
+		pring = &psli->ring[i];
+		while (pring->txcmplq_cnt) {
+			msleep(10);
+			if (cnt++ > 500) {  /* 5 secs */
+				lpfc_printf_log(phba,
+					KERN_WARNING, LOG_INIT,
+					"0466 Outstanding IO when "
+					"bringing Adapter offline\n");
+				break;
+			}
+		}
+	}
+
+	init_completion(&online_compl);
+	rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
+	if (rc == 0)
+		return -ENOMEM;
+
+	wait_for_completion(&online_compl);
+
+	if (status != 0)
+		return -EIO;
+
+	return 0;
+}
+
+/**
+ * lpfc_selective_reset - Offline then onlines the port
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * If the port is configured to allow a reset then the hba is brought
+ * offline then online.
+ *
+ * Notes:
+ * Assumes any error from lpfc_do_offline() will be negative.
+ * Do not make this function static.
+ *
+ * Returns:
+ * lpfc_do_offline() return code if not zero
+ * -EIO reset not configured or error posting the event
+ * zero for success
+ **/
+int
+lpfc_selective_reset(struct lpfc_hba *phba)
+{
+	struct completion online_compl;
+	int status = 0;
+	int rc;
+
+	if (!phba->cfg_enable_hba_reset)
+		return -EACCES;
+
+	status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+
+	if (status != 0)
+		return status;
+
+	init_completion(&online_compl);
+	rc = lpfc_workq_post_event(phba, &status, &online_compl,
+			      LPFC_EVT_ONLINE);
+	if (rc == 0)
+		return -ENOMEM;
+
+	wait_for_completion(&online_compl);
+
+	if (status != 0)
+		return -EIO;
+
+	return 0;
+}
+
+/**
+ * lpfc_issue_reset - Selectively resets an adapter
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string "selective".
+ * @count: unused variable.
+ *
+ * Description:
+ * If the buf contains the string "selective" then lpfc_selective_reset()
+ * is called to perform the reset.
+ *
+ * Notes:
+ * Assumes any error from lpfc_selective_reset() will be negative.
+ * If lpfc_selective_reset() returns zero then the length of the buffer
+ * is returned which indicates success
+ *
+ * Returns:
+ * -EINVAL if the buffer does not contain the string "selective"
+ * length of buf if lpfc-selective_reset() if the call succeeds
+ * return value of lpfc_selective_reset() if the call fails
+**/
+static ssize_t
+lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
+		 const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	int status = -EINVAL;
+
+	if (!phba->cfg_enable_hba_reset)
+		return -EACCES;
+
+	if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
+		status = phba->lpfc_selective_reset(phba);
+
+	if (status == 0)
+		return strlen(buf);
+	else
+		return status;
+}
+
+/**
+ * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * SLI4 interface type-2 device to wait on the sliport status register for
+ * the readyness after performing a firmware reset.
+ *
+ * Returns:
+ * zero for success, -EPERM when port does not have privilage to perform the
+ * reset, -EIO when port timeout from recovering from the reset.
+ *
+ * Note:
+ * As the caller will interpret the return code by value, be careful in making
+ * change or addition to return codes.
+ **/
+int
+lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
+{
+	struct lpfc_register portstat_reg = {0};
+	int i;
+
+	msleep(100);
+	lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+		   &portstat_reg.word0);
+
+	/* verify if privilaged for the request operation */
+	if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
+	    !bf_get(lpfc_sliport_status_err, &portstat_reg))
+		return -EPERM;
+
+	/* wait for the SLI port firmware ready after firmware reset */
+	for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
+		msleep(10);
+		lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+			   &portstat_reg.word0);
+		if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
+			continue;
+		if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
+			continue;
+		if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg))
+			continue;
+		break;
+	}
+
+	if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT)
+		return 0;
+	else
+		return -EIO;
+}
+
+/**
+ * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * Request SLI4 interface type-2 device to perform a physical register set
+ * access.
+ *
+ * Returns:
+ * zero for success
+ **/
+static ssize_t
+lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
+{
+	struct completion online_compl;
+	struct pci_dev *pdev = phba->pcidev;
+	uint32_t before_fc_flag;
+	uint32_t sriov_nr_virtfn;
+	uint32_t reg_val;
+	int status = 0, rc = 0;
+	int job_posted = 1, sriov_err;
+
+	if (!phba->cfg_enable_hba_reset)
+		return -EACCES;
+
+	if ((phba->sli_rev < LPFC_SLI_REV4) ||
+	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+	     LPFC_SLI_INTF_IF_TYPE_2))
+		return -EPERM;
+
+	/* Keep state if we need to restore back */
+	before_fc_flag = phba->pport->fc_flag;
+	sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
+
+	/* Disable SR-IOV virtual functions if enabled */
+	if (phba->cfg_sriov_nr_virtfn) {
+		pci_disable_sriov(pdev);
+		phba->cfg_sriov_nr_virtfn = 0;
+	}
+	status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+
+	if (status != 0)
+		return status;
+
+	/* wait for the device to be quiesced before firmware reset */
+	msleep(100);
+
+	reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
+			LPFC_CTL_PDEV_CTL_OFFSET);
+
+	if (opcode == LPFC_FW_DUMP)
+		reg_val |= LPFC_FW_DUMP_REQUEST;
+	else if (opcode == LPFC_FW_RESET)
+		reg_val |= LPFC_CTL_PDEV_CTL_FRST;
+	else if (opcode == LPFC_DV_RESET)
+		reg_val |= LPFC_CTL_PDEV_CTL_DRST;
+
+	writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
+	       LPFC_CTL_PDEV_CTL_OFFSET);
+	/* flush */
+	readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+
+	/* delay driver action following IF_TYPE_2 reset */
+	rc = lpfc_sli4_pdev_status_reg_wait(phba);
+
+	if (rc == -EPERM) {
+		/* no privilage for reset */
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"3150 No privilage to perform the requested "
+				"access: x%x\n", reg_val);
+	} else if (rc == -EIO) {
+		/* reset failed, there is nothing more we can do */
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"3153 Fail to perform the requested "
+				"access: x%x\n", reg_val);
+		return rc;
+	}
+
+	/* keep the original port state */
+	if (before_fc_flag & FC_OFFLINE_MODE)
+		goto out;
+
+	init_completion(&online_compl);
+	job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
+					   LPFC_EVT_ONLINE);
+	if (!job_posted)
+		goto out;
+
+	wait_for_completion(&online_compl);
+
+out:
+	/* in any case, restore the virtual functions enabled as before */
+	if (sriov_nr_virtfn) {
+		sriov_err =
+			lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
+		if (!sriov_err)
+			phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn;
+	}
+
+	/* return proper error code */
+	if (!rc) {
+		if (!job_posted)
+			rc = -ENOMEM;
+		else if (status)
+			rc = -EIO;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_nport_evt_cnt_show - Return the number of nport events
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the ascii number of nport events.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
+}
+
+/**
+ * lpfc_board_mode_show - Return the state of the board
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the state of the adapter.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
+		     char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	char  * state;
+
+	if (phba->link_state == LPFC_HBA_ERROR)
+		state = "error";
+	else if (phba->link_state == LPFC_WARM_START)
+		state = "warm start";
+	else if (phba->link_state == LPFC_INIT_START)
+		state = "offline";
+	else
+		state = "online";
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", state);
+}
+
+/**
+ * lpfc_board_mode_store - Puts the hba in online, offline, warm or error state
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing one of the strings "online", "offline", "warm" or "error".
+ * @count: unused variable.
+ *
+ * Returns:
+ * -EACCES if enable hba reset not enabled
+ * -EINVAL if the buffer does not contain a valid string (see above)
+ * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails
+ * buf length greater than zero indicates success
+ **/
+static ssize_t
+lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
+		      const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	struct completion online_compl;
+	char *board_mode_str = NULL;
+	int status = 0;
+	int rc;
+
+	if (!phba->cfg_enable_hba_reset) {
+		status = -EACCES;
+		goto board_mode_out;
+	}
+
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+			 "3050 lpfc_board_mode set to %s\n", buf);
+
+	init_completion(&online_compl);
+
+	if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
+		rc = lpfc_workq_post_event(phba, &status, &online_compl,
+				      LPFC_EVT_ONLINE);
+		if (rc == 0) {
+			status = -ENOMEM;
+			goto board_mode_out;
+		}
+		wait_for_completion(&online_compl);
+	} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
+		status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+	else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			status = -EINVAL;
+		else
+			status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
+	else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			status = -EINVAL;
+		else
+			status = lpfc_do_offline(phba, LPFC_EVT_KILL);
+	else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
+		status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
+	else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
+		status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
+	else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
+		status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
+	else
+		status = -EINVAL;
+
+board_mode_out:
+	if (!status)
+		return strlen(buf);
+	else {
+		board_mode_str = strchr(buf, '\n');
+		if (board_mode_str)
+			*board_mode_str = '\0';
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "3097 Failed \"%s\", status(%d), "
+				 "fc_flag(x%x)\n",
+				 buf, status, phba->pport->fc_flag);
+		return status;
+	}
+}
+
+/**
+ * lpfc_get_hba_info - Return various bits of informaton about the adapter
+ * @phba: pointer to the adapter structure.
+ * @mxri: max xri count.
+ * @axri: available xri count.
+ * @mrpi: max rpi count.
+ * @arpi: available rpi count.
+ * @mvpi: max vpi count.
+ * @avpi: available vpi count.
+ *
+ * Description:
+ * If an integer pointer for an count is not null then the value for the
+ * count is returned.
+ *
+ * Returns:
+ * zero on error
+ * one for success
+ **/
+static int
+lpfc_get_hba_info(struct lpfc_hba *phba,
+		  uint32_t *mxri, uint32_t *axri,
+		  uint32_t *mrpi, uint32_t *arpi,
+		  uint32_t *mvpi, uint32_t *avpi)
+{
+	struct lpfc_mbx_read_config *rd_config;
+	LPFC_MBOXQ_t *pmboxq;
+	MAILBOX_t *pmb;
+	int rc = 0;
+	uint32_t max_vpi;
+
+	/*
+	 * prevent udev from issuing mailbox commands until the port is
+	 * configured.
+	 */
+	if (phba->link_state < LPFC_LINK_DOWN ||
+	    !phba->mbox_mem_pool ||
+	    (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
+		return 0;
+
+	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
+		return 0;
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq)
+		return 0;
+	memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+
+	pmb = &pmboxq->u.mb;
+	pmb->mbxCommand = MBX_READ_CONFIG;
+	pmb->mbxOwner = OWN_HOST;
+	pmboxq->context1 = NULL;
+
+	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+		rc = MBX_NOT_FINISHED;
+	else
+		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+	if (rc != MBX_SUCCESS) {
+		if (rc != MBX_TIMEOUT)
+			mempool_free(pmboxq, phba->mbox_mem_pool);
+		return 0;
+	}
+
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		rd_config = &pmboxq->u.mqe.un.rd_config;
+		if (mrpi)
+			*mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
+		if (arpi)
+			*arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
+					phba->sli4_hba.max_cfg_param.rpi_used;
+		if (mxri)
+			*mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
+		if (axri)
+			*axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
+					phba->sli4_hba.max_cfg_param.xri_used;
+
+		/* Account for differences with SLI-3.  Get vpi count from
+		 * mailbox data and subtract one for max vpi value.
+		 */
+		max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
+			(bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
+
+		if (mvpi)
+			*mvpi = max_vpi;
+		if (avpi)
+			*avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
+	} else {
+		if (mrpi)
+			*mrpi = pmb->un.varRdConfig.max_rpi;
+		if (arpi)
+			*arpi = pmb->un.varRdConfig.avail_rpi;
+		if (mxri)
+			*mxri = pmb->un.varRdConfig.max_xri;
+		if (axri)
+			*axri = pmb->un.varRdConfig.avail_xri;
+		if (mvpi)
+			*mvpi = pmb->un.varRdConfig.max_vpi;
+		if (avpi)
+			*avpi = pmb->un.varRdConfig.avail_vpi;
+	}
+
+	mempool_free(pmboxq, phba->mbox_mem_pool);
+	return 1;
+}
+
+/**
+ * lpfc_max_rpi_show - Return maximum rpi
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the maximum rpi count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mrpi count.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t cnt;
+
+	if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
+		return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
+	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+/**
+ * lpfc_used_rpi_show - Return maximum rpi minus available rpi
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the used rpi count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
+		   char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t cnt, acnt;
+
+	if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
+		return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+/**
+ * lpfc_max_xri_show - Return maximum xri
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the maximum xri count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mrpi count.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t cnt;
+
+	if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
+		return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
+	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+/**
+ * lpfc_used_xri_show - Return maximum xpi minus the available xpi
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the used xri count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mxri and axri counts.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
+		   char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t cnt, acnt;
+
+	if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
+		return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+/**
+ * lpfc_max_vpi_show - Return maximum vpi
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the maximum vpi count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mvpi count.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t cnt;
+
+	if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
+		return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
+	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+/**
+ * lpfc_used_vpi_show - Return maximum vpi minus the available vpi
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the used vpi count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
+		   char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t cnt, acnt;
+
+	if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
+		return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+	return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+/**
+ * lpfc_npiv_info_show - Return text about NPIV support for the adapter
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: text that must be interpreted to determine if npiv is supported.
+ *
+ * Description:
+ * Buffer will contain text indicating npiv is not suppoerted on the port,
+ * the port is an NPIV physical port, or it is an npiv virtual port with
+ * the id of the vport.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
+		    char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	if (!(phba->max_vpi))
+		return snprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
+	if (vport->port_type == LPFC_PHYSICAL_PORT)
+		return snprintf(buf, PAGE_SIZE, "NPIV Physical\n");
+	return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
+}
+
+/**
+ * lpfc_poll_show - Return text about poll support for the adapter
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the cfg_poll in hex.
+ *
+ * Notes:
+ * cfg_poll should be a lpfc_polling_flags type.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_poll_show(struct device *dev, struct device_attribute *attr,
+	       char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
+}
+
+/**
+ * lpfc_poll_store - Set the value of cfg_poll for the adapter
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: one or more lpfc_polling_flags values.
+ * @count: not used.
+ *
+ * Notes:
+ * buf contents converted to integer and checked for a valid value.
+ *
+ * Returns:
+ * -EINVAL if the buffer connot be converted or is out of range
+ * length of the buf on success
+ **/
+static ssize_t
+lpfc_poll_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t creg_val;
+	uint32_t old_val;
+	int val=0;
+
+	if (!isdigit(buf[0]))
+		return -EINVAL;
+
+	if (sscanf(buf, "%i", &val) != 1)
+		return -EINVAL;
+
+	if ((val & 0x3) != val)
+		return -EINVAL;
+
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		val = 0;
+
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+		"3051 lpfc_poll changed from %d to %d\n",
+		phba->cfg_poll, val);
+
+	spin_lock_irq(&phba->hbalock);
+
+	old_val = phba->cfg_poll;
+
+	if (val & ENABLE_FCP_RING_POLLING) {
+		if ((val & DISABLE_FCP_RING_INT) &&
+		    !(old_val & DISABLE_FCP_RING_INT)) {
+			if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+				spin_unlock_irq(&phba->hbalock);
+				return -EINVAL;
+			}
+			creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
+			writel(creg_val, phba->HCregaddr);
+			readl(phba->HCregaddr); /* flush */
+
+			lpfc_poll_start_timer(phba);
+		}
+	} else if (val != 0x0) {
+		spin_unlock_irq(&phba->hbalock);
+		return -EINVAL;
+	}
+
+	if (!(val & DISABLE_FCP_RING_INT) &&
+	    (old_val & DISABLE_FCP_RING_INT))
+	{
+		spin_unlock_irq(&phba->hbalock);
+		del_timer(&phba->fcp_poll_timer);
+		spin_lock_irq(&phba->hbalock);
+		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+			spin_unlock_irq(&phba->hbalock);
+			return -EINVAL;
+		}
+		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+		writel(creg_val, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+	}
+
+	phba->cfg_poll = val;
+
+	spin_unlock_irq(&phba->hbalock);
+
+	return strlen(buf);
+}
+
+/**
+ * lpfc_fips_level_show - Return the current FIPS level for the HBA
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_fips_level_show(struct device *dev,  struct device_attribute *attr,
+		     char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
+}
+
+/**
+ * lpfc_fips_rev_show - Return the FIPS Spec revision for the HBA
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_fips_rev_show(struct device *dev,  struct device_attribute *attr,
+		   char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
+}
+
+/**
+ * lpfc_dss_show - Return the current state of dss and the configured state
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_dss_show(struct device *dev, struct device_attribute *attr,
+	      char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
+			(phba->cfg_enable_dss) ? "Enabled" : "Disabled",
+			(phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
+				"" : "Not ");
+}
+
+/**
+ * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted support level.
+ *
+ * Description:
+ * Returns the maximum number of virtual functions a physical function can
+ * support, 0 will be returned if called on virtual function.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sriov_hw_max_virtfn_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	uint16_t max_nr_virtfn;
+
+	max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
+	return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
+}
+
+/**
+ * lpfc_param_show - Return a cfg attribute value in decimal
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_show.
+ *
+ * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the attribute value in decimal.
+ *
+ * Returns: size of formatted string.
+ **/
+#define lpfc_param_show(attr)	\
+static ssize_t \
+lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
+		   char *buf) \
+{ \
+	struct Scsi_Host  *shost = class_to_shost(dev);\
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+	struct lpfc_hba   *phba = vport->phba;\
+	uint val = 0;\
+	val = phba->cfg_##attr;\
+	return snprintf(buf, PAGE_SIZE, "%d\n",\
+			phba->cfg_##attr);\
+}
+
+/**
+ * lpfc_param_hex_show - Return a cfg attribute value in hex
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_show
+ *
+ * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the attribute value in hexadecimal.
+ *
+ * Returns: size of formatted string.
+ **/
+#define lpfc_param_hex_show(attr)	\
+static ssize_t \
+lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
+		   char *buf) \
+{ \
+	struct Scsi_Host  *shost = class_to_shost(dev);\
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+	struct lpfc_hba   *phba = vport->phba;\
+	uint val = 0;\
+	val = phba->cfg_##attr;\
+	return snprintf(buf, PAGE_SIZE, "%#x\n",\
+			phba->cfg_##attr);\
+}
+
+/**
+ * lpfc_param_init - Initializes a cfg attribute
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_init. The macro also
+ * takes a default argument, a minimum and maximum argument.
+ *
+ * lpfc_##attr##_init: Initializes an attribute.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Validates the min and max values then sets the adapter config field
+ * accordingly, or uses the default if out of range and prints an error message.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if default used
+ **/
+#define lpfc_param_init(attr, default, minval, maxval)	\
+static int \
+lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
+{ \
+	if (val >= minval && val <= maxval) {\
+		phba->cfg_##attr = val;\
+		return 0;\
+	}\
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
+			"0449 lpfc_"#attr" attribute cannot be set to %d, "\
+			"allowed range is ["#minval", "#maxval"]\n", val); \
+	phba->cfg_##attr = default;\
+	return -EINVAL;\
+}
+
+/**
+ * lpfc_param_set - Set a cfg attribute value
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_set
+ *
+ * lpfc_##attr##_set: Sets an attribute value.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Description:
+ * Validates the min and max values then sets the
+ * adapter config field if in the valid range. prints error message
+ * and does not set the parameter if invalid.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if val is invalid
+ **/
+#define lpfc_param_set(attr, default, minval, maxval)	\
+static int \
+lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
+{ \
+	if (val >= minval && val <= maxval) {\
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
+			"3052 lpfc_" #attr " changed from %d to %d\n", \
+			phba->cfg_##attr, val); \
+		phba->cfg_##attr = val;\
+		return 0;\
+	}\
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
+			"0450 lpfc_"#attr" attribute cannot be set to %d, "\
+			"allowed range is ["#minval", "#maxval"]\n", val); \
+	return -EINVAL;\
+}
+
+/**
+ * lpfc_param_store - Set a vport attribute value
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_store.
+ *
+ * lpfc_##attr##_store: Set an sttribute value.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: contains the attribute value in ascii.
+ * @count: not used.
+ *
+ * Description:
+ * Convert the ascii text number to an integer, then
+ * use the lpfc_##attr##_set function to set the value.
+ *
+ * Returns:
+ * -EINVAL if val is invalid or lpfc_##attr##_set() fails
+ * length of buffer upon success.
+ **/
+#define lpfc_param_store(attr)	\
+static ssize_t \
+lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
+		    const char *buf, size_t count) \
+{ \
+	struct Scsi_Host  *shost = class_to_shost(dev);\
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+	struct lpfc_hba   *phba = vport->phba;\
+	uint val = 0;\
+	if (!isdigit(buf[0]))\
+		return -EINVAL;\
+	if (sscanf(buf, "%i", &val) != 1)\
+		return -EINVAL;\
+	if (lpfc_##attr##_set(phba, val) == 0) \
+		return strlen(buf);\
+	else \
+		return -EINVAL;\
+}
+
+/**
+ * lpfc_vport_param_show - Return decimal formatted cfg attribute value
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_show
+ *
+ * lpfc_##attr##_show: prints the attribute value in decimal.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the attribute value in decimal.
+ *
+ * Returns: length of formatted string.
+ **/
+#define lpfc_vport_param_show(attr)	\
+static ssize_t \
+lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
+		   char *buf) \
+{ \
+	struct Scsi_Host  *shost = class_to_shost(dev);\
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+	uint val = 0;\
+	val = vport->cfg_##attr;\
+	return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
+}
+
+/**
+ * lpfc_vport_param_hex_show - Return hex formatted attribute value
+ *
+ * Description:
+ * Macro that given an attr e.g.
+ * hba_queue_depth expands into a function with the name
+ * lpfc_hba_queue_depth_show
+ *
+ * lpfc_##attr##_show: prints the attribute value in hexadecimal.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the attribute value in hexadecimal.
+ *
+ * Returns: length of formatted string.
+ **/
+#define lpfc_vport_param_hex_show(attr)	\
+static ssize_t \
+lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
+		   char *buf) \
+{ \
+	struct Scsi_Host  *shost = class_to_shost(dev);\
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+	uint val = 0;\
+	val = vport->cfg_##attr;\
+	return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
+}
+
+/**
+ * lpfc_vport_param_init - Initialize a vport cfg attribute
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_init. The macro also
+ * takes a default argument, a minimum and maximum argument.
+ *
+ * lpfc_##attr##_init: validates the min and max values then sets the
+ * adapter config field accordingly, or uses the default if out of range
+ * and prints an error message.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if default used
+ **/
+#define lpfc_vport_param_init(attr, default, minval, maxval)	\
+static int \
+lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
+{ \
+	if (val >= minval && val <= maxval) {\
+		vport->cfg_##attr = val;\
+		return 0;\
+	}\
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
+			 "0423 lpfc_"#attr" attribute cannot be set to %d, "\
+			 "allowed range is ["#minval", "#maxval"]\n", val); \
+	vport->cfg_##attr = default;\
+	return -EINVAL;\
+}
+
+/**
+ * lpfc_vport_param_set - Set a vport cfg attribute
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_set
+ *
+ * lpfc_##attr##_set: validates the min and max values then sets the
+ * adapter config field if in the valid range. prints error message
+ * and does not set the parameter if invalid.
+ * @phba: pointer the the adapter structure.
+ * @val:	integer attribute value.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if val is invalid
+ **/
+#define lpfc_vport_param_set(attr, default, minval, maxval)	\
+static int \
+lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
+{ \
+	if (val >= minval && val <= maxval) {\
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
+			"3053 lpfc_" #attr " changed from %d to %d\n", \
+			vport->cfg_##attr, val); \
+		vport->cfg_##attr = val;\
+		return 0;\
+	}\
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
+			 "0424 lpfc_"#attr" attribute cannot be set to %d, "\
+			 "allowed range is ["#minval", "#maxval"]\n", val); \
+	return -EINVAL;\
+}
+
+/**
+ * lpfc_vport_param_store - Set a vport attribute
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth
+ * expands into a function with the name lpfc_hba_queue_depth_store
+ *
+ * lpfc_##attr##_store: convert the ascii text number to an integer, then
+ * use the lpfc_##attr##_set function to set the value.
+ * @cdev: class device that is converted into a Scsi_host.
+ * @buf:	contains the attribute value in decimal.
+ * @count: not used.
+ *
+ * Returns:
+ * -EINVAL if val is invalid or lpfc_##attr##_set() fails
+ * length of buffer upon success.
+ **/
+#define lpfc_vport_param_store(attr)	\
+static ssize_t \
+lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
+		    const char *buf, size_t count) \
+{ \
+	struct Scsi_Host  *shost = class_to_shost(dev);\
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+	uint val = 0;\
+	if (!isdigit(buf[0]))\
+		return -EINVAL;\
+	if (sscanf(buf, "%i", &val) != 1)\
+		return -EINVAL;\
+	if (lpfc_##attr##_set(vport, val) == 0) \
+		return strlen(buf);\
+	else \
+		return -EINVAL;\
+}
+
+
+#define LPFC_ATTR(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_init(name, defval, minval, maxval)
+
+#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_show(name)\
+lpfc_param_init(name, defval, minval, maxval)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
+
+#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_show(name)\
+lpfc_param_init(name, defval, minval, maxval)\
+lpfc_param_set(name, defval, minval, maxval)\
+lpfc_param_store(name)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
+		   lpfc_##name##_show, lpfc_##name##_store)
+
+#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_hex_show(name)\
+lpfc_param_init(name, defval, minval, maxval)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
+
+#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_hex_show(name)\
+lpfc_param_init(name, defval, minval, maxval)\
+lpfc_param_set(name, defval, minval, maxval)\
+lpfc_param_store(name)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
+		   lpfc_##name##_show, lpfc_##name##_store)
+
+#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_init(name, defval, minval, maxval)
+
+#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_show(name)\
+lpfc_vport_param_init(name, defval, minval, maxval)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
+
+#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_show(name)\
+lpfc_vport_param_init(name, defval, minval, maxval)\
+lpfc_vport_param_set(name, defval, minval, maxval)\
+lpfc_vport_param_store(name)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
+		   lpfc_##name##_show, lpfc_##name##_store)
+
+#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_hex_show(name)\
+lpfc_vport_param_init(name, defval, minval, maxval)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
+
+#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_hex_show(name)\
+lpfc_vport_param_init(name, defval, minval, maxval)\
+lpfc_vport_param_set(name, defval, minval, maxval)\
+lpfc_vport_param_store(name)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
+		   lpfc_##name##_show, lpfc_##name##_store)
+
+static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
+static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
+static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
+static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL);
+static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
+static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
+static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
+static DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
+static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
+static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
+static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
+static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
+static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show,
+		lpfc_link_state_store);
+static DEVICE_ATTR(option_rom_version, S_IRUGO,
+		   lpfc_option_rom_version_show, NULL);
+static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
+		   lpfc_num_discovered_ports_show, NULL);
+static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
+static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
+static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL);
+static DEVICE_ATTR(lpfc_enable_fip, S_IRUGO, lpfc_enable_fip_show, NULL);
+static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
+		   lpfc_board_mode_show, lpfc_board_mode_store);
+static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
+static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL);
+static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL);
+static DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL);
+static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
+static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
+static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
+static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
+static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
+static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
+static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
+static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
+static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
+		   lpfc_sriov_hw_max_virtfn_show, NULL);
+static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
+
+static char *lpfc_soft_wwn_key = "C99G71SL8032A";
+
+/**
+ * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string lpfc_soft_wwn_key.
+ * @count: must be size of lpfc_soft_wwn_key.
+ *
+ * Returns:
+ * -EINVAL if the buffer does not contain lpfc_soft_wwn_key
+ * length of buf indicates success
+ **/
+static ssize_t
+lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	unsigned int cnt = count;
+
+	/*
+	 * We're doing a simple sanity check for soft_wwpn setting.
+	 * We require that the user write a specific key to enable
+	 * the soft_wwpn attribute to be settable. Once the attribute
+	 * is written, the enable key resets. If further updates are
+	 * desired, the key must be written again to re-enable the
+	 * attribute.
+	 *
+	 * The "key" is not secret - it is a hardcoded string shown
+	 * here. The intent is to protect against the random user or
+	 * application that is just writing attributes.
+	 */
+
+	/* count may include a LF at end of string */
+	if (buf[cnt-1] == '\n')
+		cnt--;
+
+	if ((cnt != strlen(lpfc_soft_wwn_key)) ||
+	    (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
+		return -EINVAL;
+
+	phba->soft_wwn_enable = 1;
+	return count;
+}
+static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
+		   lpfc_soft_wwn_enable_store);
+
+/**
+ * lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the wwpn in hexadecimal.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
+		    char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+			(unsigned long long)phba->cfg_soft_wwpn);
+}
+
+/**
+ * lpfc_soft_wwpn_store - Set the ww port name of the adapter
+ * @dev class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: contains the wwpn in hexadecimal.
+ * @count: number of wwpn bytes in buf
+ *
+ * Returns:
+ * -EACCES hba reset not enabled, adapter over temp
+ * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid
+ * -EIO error taking adapter offline or online
+ * value of count on success
+ **/
+static ssize_t
+lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
+		     const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	struct completion online_compl;
+	int stat1=0, stat2=0;
+	unsigned int i, j, cnt=count;
+	u8 wwpn[8];
+	int rc;
+
+	if (!phba->cfg_enable_hba_reset)
+		return -EACCES;
+	spin_lock_irq(&phba->hbalock);
+	if (phba->over_temp_state == HBA_OVER_TEMP) {
+		spin_unlock_irq(&phba->hbalock);
+		return -EACCES;
+	}
+	spin_unlock_irq(&phba->hbalock);
+	/* count may include a LF at end of string */
+	if (buf[cnt-1] == '\n')
+		cnt--;
+
+	if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
+	    ((cnt == 17) && (*buf++ != 'x')) ||
+	    ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+		return -EINVAL;
+
+	phba->soft_wwn_enable = 0;
+
+	memset(wwpn, 0, sizeof(wwpn));
+
+	/* Validate and store the new name */
+	for (i=0, j=0; i < 16; i++) {
+		int value;
+
+		value = hex_to_bin(*buf++);
+		if (value >= 0)
+			j = (j << 4) | value;
+		else
+			return -EINVAL;
+		if (i % 2) {
+			wwpn[i/2] = j & 0xff;
+			j = 0;
+		}
+	}
+	phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
+	fc_host_port_name(shost) = phba->cfg_soft_wwpn;
+	if (phba->cfg_soft_wwnn)
+		fc_host_node_name(shost) = phba->cfg_soft_wwnn;
+
+	dev_printk(KERN_NOTICE, &phba->pcidev->dev,
+		   "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
+
+	stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+	if (stat1)
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0463 lpfc_soft_wwpn attribute set failed to "
+				"reinit adapter - %d\n", stat1);
+	init_completion(&online_compl);
+	rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
+				   LPFC_EVT_ONLINE);
+	if (rc == 0)
+		return -ENOMEM;
+
+	wait_for_completion(&online_compl);
+	if (stat2)
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0464 lpfc_soft_wwpn attribute set failed to "
+				"reinit adapter - %d\n", stat2);
+	return (stat1 || stat2) ? -EIO : count;
+}
+static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
+		   lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
+
+/**
+ * lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the wwnn in hexadecimal.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
+		    char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+			(unsigned long long)phba->cfg_soft_wwnn);
+}
+
+/**
+ * lpfc_soft_wwnn_store - sets the ww node name of the adapter
+ * @cdev: class device that is converted into a Scsi_host.
+ * @buf: contains the ww node name in hexadecimal.
+ * @count: number of wwnn bytes in buf.
+ *
+ * Returns:
+ * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid
+ * value of count on success
+ **/
+static ssize_t
+lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
+		     const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	unsigned int i, j, cnt=count;
+	u8 wwnn[8];
+
+	/* count may include a LF at end of string */
+	if (buf[cnt-1] == '\n')
+		cnt--;
+
+	if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
+	    ((cnt == 17) && (*buf++ != 'x')) ||
+	    ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+		return -EINVAL;
+
+	/*
+	 * Allow wwnn to be set many times, as long as the enable is set.
+	 * However, once the wwpn is set, everything locks.
+	 */
+
+	memset(wwnn, 0, sizeof(wwnn));
+
+	/* Validate and store the new name */
+	for (i=0, j=0; i < 16; i++) {
+		int value;
+
+		value = hex_to_bin(*buf++);
+		if (value >= 0)
+			j = (j << 4) | value;
+		else
+			return -EINVAL;
+		if (i % 2) {
+			wwnn[i/2] = j & 0xff;
+			j = 0;
+		}
+	}
+	phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
+
+	dev_printk(KERN_NOTICE, &phba->pcidev->dev,
+		   "lpfc%d: soft_wwnn set. Value will take effect upon "
+		   "setting of the soft_wwpn\n", phba->brd_no);
+
+	return count;
+}
+static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\
+		   lpfc_soft_wwnn_show, lpfc_soft_wwnn_store);
+
+
+static int lpfc_poll = 0;
+module_param(lpfc_poll, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
+		 " 0 - none,"
+		 " 1 - poll with interrupts enabled"
+		 " 3 - poll and disable FCP ring interrupts");
+
+static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
+		   lpfc_poll_show, lpfc_poll_store);
+
+int  lpfc_sli_mode = 0;
+module_param(lpfc_sli_mode, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
+		 " 0 - auto (SLI-3 if supported),"
+		 " 2 - select SLI-2 even on SLI-3 capable HBAs,"
+		 " 3 - select SLI-3");
+
+int lpfc_enable_npiv = 1;
+module_param(lpfc_enable_npiv, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality");
+lpfc_param_show(enable_npiv);
+lpfc_param_init(enable_npiv, 1, 0, 1);
+static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
+
+LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
+	"FCF Fast failover=1 Priority failover=2");
+
+int lpfc_enable_rrq;
+module_param(lpfc_enable_rrq, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
+lpfc_param_show(enable_rrq);
+lpfc_param_init(enable_rrq, 0, 0, 1);
+static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL);
+
+/*
+# lpfc_suppress_link_up:  Bring link up at initialization
+#            0x0  = bring link up (issue MBX_INIT_LINK)
+#            0x1  = do NOT bring link up at initialization(MBX_INIT_LINK)
+#            0x2  = never bring up link
+# Default value is 0.
+*/
+LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
+		LPFC_DELAY_INIT_LINK_INDEFINITELY,
+		"Suppress Link Up at initialization");
+/*
+# lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS
+#       1 - (1024)
+#       2 - (2048)
+#       3 - (3072)
+#       4 - (4096)
+#       5 - (5120)
+*/
+static ssize_t
+lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
+}
+
+static DEVICE_ATTR(iocb_hw, S_IRUGO,
+			 lpfc_iocb_hw_show, NULL);
+static ssize_t
+lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+		phba->sli.ring[LPFC_ELS_RING].txq_max);
+}
+
+static DEVICE_ATTR(txq_hw, S_IRUGO,
+			 lpfc_txq_hw_show, NULL);
+static ssize_t
+lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+		phba->sli.ring[LPFC_ELS_RING].txcmplq_max);
+}
+
+static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
+			 lpfc_txcmplq_hw_show, NULL);
+
+int lpfc_iocb_cnt = 2;
+module_param(lpfc_iocb_cnt, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_iocb_cnt,
+	"Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
+lpfc_param_show(iocb_cnt);
+lpfc_param_init(iocb_cnt, 2, 1, 5);
+static DEVICE_ATTR(lpfc_iocb_cnt, S_IRUGO,
+			 lpfc_iocb_cnt_show, NULL);
+
+/*
+# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
+# until the timer expires. Value range is [0,255]. Default value is 30.
+*/
+static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
+static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO;
+module_param(lpfc_nodev_tmo, int, 0);
+MODULE_PARM_DESC(lpfc_nodev_tmo,
+		 "Seconds driver will hold I/O waiting "
+		 "for a device to come back");
+
+/**
+ * lpfc_nodev_tmo_show - Return the hba dev loss timeout value
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the dev loss timeout in decimal.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
+		    char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",	vport->cfg_devloss_tmo);
+}
+
+/**
+ * lpfc_nodev_tmo_init - Set the hba nodev timeout value
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the nodev timeout value.
+ *
+ * Description:
+ * If the devloss tmo is already set then nodev tmo is set to devloss tmo,
+ * a kernel error message is printed and zero is returned.
+ * Else if val is in range then nodev tmo and devloss tmo are set to val.
+ * Otherwise nodev tmo is set to the default value.
+ *
+ * Returns:
+ * zero if already set or if val is in range
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
+{
+	if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) {
+		vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
+		if (val != LPFC_DEF_DEVLOSS_TMO)
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "0407 Ignoring nodev_tmo module "
+					 "parameter because devloss_tmo is "
+					 "set.\n");
+		return 0;
+	}
+
+	if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
+		vport->cfg_nodev_tmo = val;
+		vport->cfg_devloss_tmo = val;
+		return 0;
+	}
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+			 "0400 lpfc_nodev_tmo attribute cannot be set to"
+			 " %d, allowed range is [%d, %d]\n",
+			 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
+	vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
+	return -EINVAL;
+}
+
+/**
+ * lpfc_update_rport_devloss_tmo - Update dev loss tmo value
+ * @vport: lpfc vport structure pointer.
+ *
+ * Description:
+ * Update all the ndlp's dev loss tmo with the vport devloss tmo value.
+ **/
+static void
+lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
+{
+	struct Scsi_Host  *shost;
+	struct lpfc_nodelist  *ndlp;
+
+	shost = lpfc_shost_from_vport(vport);
+	spin_lock_irq(shost->host_lock);
+	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
+		if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport)
+			ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
+	spin_unlock_irq(shost->host_lock);
+}
+
+/**
+ * lpfc_nodev_tmo_set - Set the vport nodev tmo and devloss tmo values
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the tmo value.
+ *
+ * Description:
+ * If the devloss tmo is already set or the vport dev loss tmo has changed
+ * then a kernel error message is printed and zero is returned.
+ * Else if val is in range then nodev tmo and devloss tmo are set to val.
+ * Otherwise nodev tmo is set to the default value.
+ *
+ * Returns:
+ * zero if already set or if val is in range
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
+{
+	if (vport->dev_loss_tmo_changed ||
+	    (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "0401 Ignoring change to nodev_tmo "
+				 "because devloss_tmo is set.\n");
+		return 0;
+	}
+	if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
+		vport->cfg_nodev_tmo = val;
+		vport->cfg_devloss_tmo = val;
+		/*
+		 * For compat: set the fc_host dev loss so new rports
+		 * will get the value.
+		 */
+		fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
+		lpfc_update_rport_devloss_tmo(vport);
+		return 0;
+	}
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+			 "0403 lpfc_nodev_tmo attribute cannot be set to"
+			 "%d, allowed range is [%d, %d]\n",
+			 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
+	return -EINVAL;
+}
+
+lpfc_vport_param_store(nodev_tmo)
+
+static DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR,
+		   lpfc_nodev_tmo_show, lpfc_nodev_tmo_store);
+
+/*
+# lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that
+# disappear until the timer expires. Value range is [0,255]. Default
+# value is 30.
+*/
+module_param(lpfc_devloss_tmo, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_devloss_tmo,
+		 "Seconds driver will hold I/O waiting "
+		 "for a device to come back");
+lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
+		      LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
+lpfc_vport_param_show(devloss_tmo)
+
+/**
+ * lpfc_devloss_tmo_set - Sets vport nodev tmo, devloss tmo values, changed bit
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the tmo value.
+ *
+ * Description:
+ * If val is in a valid range then set the vport nodev tmo,
+ * devloss tmo, also set the vport dev loss tmo changed flag.
+ * Else a kernel error message is printed.
+ *
+ * Returns:
+ * zero if val is in range
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
+{
+	if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
+		vport->cfg_nodev_tmo = val;
+		vport->cfg_devloss_tmo = val;
+		vport->dev_loss_tmo_changed = 1;
+		fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
+		lpfc_update_rport_devloss_tmo(vport);
+		return 0;
+	}
+
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+			 "0404 lpfc_devloss_tmo attribute cannot be set to"
+			 " %d, allowed range is [%d, %d]\n",
+			 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
+	return -EINVAL;
+}
+
+lpfc_vport_param_store(devloss_tmo)
+static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
+		   lpfc_devloss_tmo_show, lpfc_devloss_tmo_store);
+
+/*
+# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
+# deluged with LOTS of information.
+# You can set a bit mask to record specific types of verbose messages:
+# See lpfc_logmsh.h for definitions.
+*/
+LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
+		       "Verbose logging bit-mask");
+
+/*
+# lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters
+# objects that have been registered with the nameserver after login.
+*/
+LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
+		  "Deregister nameserver objects before LOGO");
+
+/*
+# lun_queue_depth:  This parameter is used to limit the number of outstanding
+# commands per FCP LUN. Value range is [1,128]. Default value is 30.
+*/
+LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128,
+		  "Max number of FCP commands we can queue to a specific LUN");
+
+/*
+# tgt_queue_depth:  This parameter is used to limit the number of outstanding
+# commands per target port. Value range is [10,65535]. Default value is 65535.
+*/
+LPFC_VPORT_ATTR_R(tgt_queue_depth, 65535, 10, 65535,
+	"Max number of FCP commands we can queue to a specific target port");
+
+/*
+# hba_queue_depth:  This parameter is used to limit the number of outstanding
+# commands per lpfc HBA. Value range is [32,8192]. If this parameter
+# value is greater than the maximum number of exchanges supported by the HBA,
+# then maximum number of exchanges supported by the HBA is used to determine
+# the hba_queue_depth.
+*/
+LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192,
+	    "Max number of FCP commands we can queue to a lpfc HBA");
+
+/*
+# peer_port_login:  This parameter allows/prevents logins
+# between peer ports hosted on the same physical port.
+# When this parameter is set 0 peer ports of same physical port
+# are not allowed to login to each other.
+# When this parameter is set 1 peer ports of same physical port
+# are allowed to login to each other.
+# Default value of this parameter is 0.
+*/
+LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1,
+		  "Allow peer ports on the same physical port to login to each "
+		  "other.");
+
+/*
+# restrict_login:  This parameter allows/prevents logins
+# between Virtual Ports and remote initiators.
+# When this parameter is not set (0) Virtual Ports will accept PLOGIs from
+# other initiators and will attempt to PLOGI all remote ports.
+# When this parameter is set (1) Virtual Ports will reject PLOGIs from
+# remote ports and will not attempt to PLOGI to other initiators.
+# This parameter does not restrict to the physical port.
+# This parameter does not restrict logins to Fabric resident remote ports.
+# Default value of this parameter is 1.
+*/
+static int lpfc_restrict_login = 1;
+module_param(lpfc_restrict_login, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_restrict_login,
+		 "Restrict virtual ports login to remote initiators.");
+lpfc_vport_param_show(restrict_login);
+
+/**
+ * lpfc_restrict_login_init - Set the vport restrict login flag
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the restrict login value.
+ *
+ * Description:
+ * If val is not in a valid range then log a kernel error message and set
+ * the vport restrict login to one.
+ * If the port type is physical clear the restrict login flag and return.
+ * Else set the restrict login flag to val.
+ *
+ * Returns:
+ * zero if val is in range
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
+{
+	if (val < 0 || val > 1) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "0422 lpfc_restrict_login attribute cannot "
+				 "be set to %d, allowed range is [0, 1]\n",
+				 val);
+		vport->cfg_restrict_login = 1;
+		return -EINVAL;
+	}
+	if (vport->port_type == LPFC_PHYSICAL_PORT) {
+		vport->cfg_restrict_login = 0;
+		return 0;
+	}
+	vport->cfg_restrict_login = val;
+	return 0;
+}
+
+/**
+ * lpfc_restrict_login_set - Set the vport restrict login flag
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the restrict login value.
+ *
+ * Description:
+ * If val is not in a valid range then log a kernel error message and set
+ * the vport restrict login to one.
+ * If the port type is physical and the val is not zero log a kernel
+ * error message, clear the restrict login flag and return zero.
+ * Else set the restrict login flag to val.
+ *
+ * Returns:
+ * zero if val is in range
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_restrict_login_set(struct lpfc_vport *vport, int val)
+{
+	if (val < 0 || val > 1) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "0425 lpfc_restrict_login attribute cannot "
+				 "be set to %d, allowed range is [0, 1]\n",
+				 val);
+		vport->cfg_restrict_login = 1;
+		return -EINVAL;
+	}
+	if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "0468 lpfc_restrict_login must be 0 for "
+				 "Physical ports.\n");
+		vport->cfg_restrict_login = 0;
+		return 0;
+	}
+	vport->cfg_restrict_login = val;
+	return 0;
+}
+lpfc_vport_param_store(restrict_login);
+static DEVICE_ATTR(lpfc_restrict_login, S_IRUGO | S_IWUSR,
+		   lpfc_restrict_login_show, lpfc_restrict_login_store);
+
+/*
+# Some disk devices have a "select ID" or "select Target" capability.
+# From a protocol standpoint "select ID" usually means select the
+# Fibre channel "ALPA".  In the FC-AL Profile there is an "informative
+# annex" which contains a table that maps a "select ID" (a number
+# between 0 and 7F) to an ALPA.  By default, for compatibility with
+# older drivers, the lpfc driver scans this table from low ALPA to high
+# ALPA.
+#
+# Turning on the scan-down variable (on  = 1, off = 0) will
+# cause the lpfc driver to use an inverted table, effectively
+# scanning ALPAs from high to low. Value range is [0,1]. Default value is 1.
+#
+# (Note: This "select ID" functionality is a LOOP ONLY characteristic
+# and will not work across a fabric. Also this parameter will take
+# effect only in the case when ALPA map is not available.)
+*/
+LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
+		  "Start scanning for devices from highest ALPA to lowest");
+
+/*
+# lpfc_topology:  link topology for init link
+#            0x0  = attempt loop mode then point-to-point
+#            0x01 = internal loopback mode
+#            0x02 = attempt point-to-point mode only
+#            0x04 = attempt loop mode only
+#            0x06 = attempt point-to-point mode then loop
+# Set point-to-point mode if you want to run as an N_Port.
+# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
+# Default value is 0.
+*/
+
+/**
+ * lpfc_topology_set - Set the adapters topology field
+ * @phba: lpfc_hba pointer.
+ * @val: topology value.
+ *
+ * Description:
+ * If val is in a valid range then set the adapter's topology field and
+ * issue a lip; if the lip fails reset the topology to the old value.
+ *
+ * If the value is not in range log a kernel error message and return an error.
+ *
+ * Returns:
+ * zero if val is in range and lip okay
+ * non-zero return value from lpfc_issue_lip()
+ * -EINVAL val out of range
+ **/
+static ssize_t
+lpfc_topology_store(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	int val = 0;
+	int nolip = 0;
+	const char *val_buf = buf;
+	int err;
+	uint32_t prev_val;
+
+	if (!strncmp(buf, "nolip ", strlen("nolip "))) {
+		nolip = 1;
+		val_buf = &buf[strlen("nolip ")];
+	}
+
+	if (!isdigit(val_buf[0]))
+		return -EINVAL;
+	if (sscanf(val_buf, "%i", &val) != 1)
+		return -EINVAL;
+
+	if (val >= 0 && val <= 6) {
+		prev_val = phba->cfg_topology;
+		phba->cfg_topology = val;
+		if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
+			val == 4) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"3113 Loop mode not supported at speed %d\n",
+				phba->cfg_link_speed);
+			phba->cfg_topology = prev_val;
+			return -EINVAL;
+		}
+		if (nolip)
+			return strlen(buf);
+
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+			"3054 lpfc_topology changed from %d to %d\n",
+			prev_val, val);
+		err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
+		if (err) {
+			phba->cfg_topology = prev_val;
+			return -EINVAL;
+		} else
+			return strlen(buf);
+	}
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+		"%d:0467 lpfc_topology attribute cannot be set to %d, "
+		"allowed range is [0, 6]\n",
+		phba->brd_no, val);
+	return -EINVAL;
+}
+static int lpfc_topology = 0;
+module_param(lpfc_topology, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology");
+lpfc_param_show(topology)
+lpfc_param_init(topology, 0, 0, 6)
+static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
+		lpfc_topology_show, lpfc_topology_store);
+
+/**
+ * lpfc_static_vport_show: Read callback function for
+ *   lpfc_static_vport sysfs file.
+ * @dev: Pointer to class device object.
+ * @attr: device attribute structure.
+ * @buf: Data buffer.
+ *
+ * This function is the read call back function for
+ * lpfc_static_vport sysfs file. The lpfc_static_vport
+ * sysfs file report the mageability of the vport.
+ **/
+static ssize_t
+lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	if (vport->vport_flag & STATIC_VPORT)
+		sprintf(buf, "1\n");
+	else
+		sprintf(buf, "0\n");
+
+	return strlen(buf);
+}
+
+/*
+ * Sysfs attribute to control the statistical data collection.
+ */
+static DEVICE_ATTR(lpfc_static_vport, S_IRUGO,
+		   lpfc_static_vport_show, NULL);
+
+/**
+ * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
+ * @dev: Pointer to class device.
+ * @buf: Data buffer.
+ * @count: Size of the data buffer.
+ *
+ * This function get called when an user write to the lpfc_stat_data_ctrl
+ * sysfs file. This function parse the command written to the sysfs file
+ * and take appropriate action. These commands are used for controlling
+ * driver statistical data collection.
+ * Following are the command this function handles.
+ *
+ *    setbucket <bucket_type> <base> <step>
+ *			       = Set the latency buckets.
+ *    destroybucket            = destroy all the buckets.
+ *    start                    = start data collection
+ *    stop                     = stop data collection
+ *    reset                    = reset the collected data
+ **/
+static ssize_t
+lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
+			  const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+#define LPFC_MAX_DATA_CTRL_LEN 1024
+	static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
+	unsigned long i;
+	char *str_ptr, *token;
+	struct lpfc_vport **vports;
+	struct Scsi_Host *v_shost;
+	char *bucket_type_str, *base_str, *step_str;
+	unsigned long base, step, bucket_type;
+
+	if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
+		if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
+			return -EINVAL;
+
+		strcpy(bucket_data, buf);
+		str_ptr = &bucket_data[0];
+		/* Ignore this token - this is command token */
+		token = strsep(&str_ptr, "\t ");
+		if (!token)
+			return -EINVAL;
+
+		bucket_type_str = strsep(&str_ptr, "\t ");
+		if (!bucket_type_str)
+			return -EINVAL;
+
+		if (!strncmp(bucket_type_str, "linear", strlen("linear")))
+			bucket_type = LPFC_LINEAR_BUCKET;
+		else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
+			bucket_type = LPFC_POWER2_BUCKET;
+		else
+			return -EINVAL;
+
+		base_str = strsep(&str_ptr, "\t ");
+		if (!base_str)
+			return -EINVAL;
+		base = simple_strtoul(base_str, NULL, 0);
+
+		step_str = strsep(&str_ptr, "\t ");
+		if (!step_str)
+			return -EINVAL;
+		step = simple_strtoul(step_str, NULL, 0);
+		if (!step)
+			return -EINVAL;
+
+		/* Block the data collection for every vport */
+		vports = lpfc_create_vport_work_array(phba);
+		if (vports == NULL)
+			return -ENOMEM;
+
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			v_shost = lpfc_shost_from_vport(vports[i]);
+			spin_lock_irq(v_shost->host_lock);
+			/* Block and reset data collection */
+			vports[i]->stat_data_blocked = 1;
+			if (vports[i]->stat_data_enabled)
+				lpfc_vport_reset_stat_data(vports[i]);
+			spin_unlock_irq(v_shost->host_lock);
+		}
+
+		/* Set the bucket attributes */
+		phba->bucket_type = bucket_type;
+		phba->bucket_base = base;
+		phba->bucket_step = step;
+
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			v_shost = lpfc_shost_from_vport(vports[i]);
+
+			/* Unblock data collection */
+			spin_lock_irq(v_shost->host_lock);
+			vports[i]->stat_data_blocked = 0;
+			spin_unlock_irq(v_shost->host_lock);
+		}
+		lpfc_destroy_vport_work_array(phba, vports);
+		return strlen(buf);
+	}
+
+	if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
+		vports = lpfc_create_vport_work_array(phba);
+		if (vports == NULL)
+			return -ENOMEM;
+
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			v_shost = lpfc_shost_from_vport(vports[i]);
+			spin_lock_irq(shost->host_lock);
+			vports[i]->stat_data_blocked = 1;
+			lpfc_free_bucket(vport);
+			vport->stat_data_enabled = 0;
+			vports[i]->stat_data_blocked = 0;
+			spin_unlock_irq(shost->host_lock);
+		}
+		lpfc_destroy_vport_work_array(phba, vports);
+		phba->bucket_type = LPFC_NO_BUCKET;
+		phba->bucket_base = 0;
+		phba->bucket_step = 0;
+		return strlen(buf);
+	}
+
+	if (!strncmp(buf, "start", strlen("start"))) {
+		/* If no buckets configured return error */
+		if (phba->bucket_type == LPFC_NO_BUCKET)
+			return -EINVAL;
+		spin_lock_irq(shost->host_lock);
+		if (vport->stat_data_enabled) {
+			spin_unlock_irq(shost->host_lock);
+			return strlen(buf);
+		}
+		lpfc_alloc_bucket(vport);
+		vport->stat_data_enabled = 1;
+		spin_unlock_irq(shost->host_lock);
+		return strlen(buf);
+	}
+
+	if (!strncmp(buf, "stop", strlen("stop"))) {
+		spin_lock_irq(shost->host_lock);
+		if (vport->stat_data_enabled == 0) {
+			spin_unlock_irq(shost->host_lock);
+			return strlen(buf);
+		}
+		lpfc_free_bucket(vport);
+		vport->stat_data_enabled = 0;
+		spin_unlock_irq(shost->host_lock);
+		return strlen(buf);
+	}
+
+	if (!strncmp(buf, "reset", strlen("reset"))) {
+		if ((phba->bucket_type == LPFC_NO_BUCKET)
+			|| !vport->stat_data_enabled)
+			return strlen(buf);
+		spin_lock_irq(shost->host_lock);
+		vport->stat_data_blocked = 1;
+		lpfc_vport_reset_stat_data(vport);
+		vport->stat_data_blocked = 0;
+		spin_unlock_irq(shost->host_lock);
+		return strlen(buf);
+	}
+	return -EINVAL;
+}
+
+
+/**
+ * lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file
+ * @dev: Pointer to class device object.
+ * @buf: Data buffer.
+ *
+ * This function is the read call back function for
+ * lpfc_stat_data_ctrl sysfs file. This function report the
+ * current statistical data collection state.
+ **/
+static ssize_t
+lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	int index = 0;
+	int i;
+	char *bucket_type;
+	unsigned long bucket_value;
+
+	switch (phba->bucket_type) {
+	case LPFC_LINEAR_BUCKET:
+		bucket_type = "linear";
+		break;
+	case LPFC_POWER2_BUCKET:
+		bucket_type = "power2";
+		break;
+	default:
+		bucket_type = "No Bucket";
+		break;
+	}
+
+	sprintf(&buf[index], "Statistical Data enabled :%d, "
+		"blocked :%d, Bucket type :%s, Bucket base :%d,"
+		" Bucket step :%d\nLatency Ranges :",
+		vport->stat_data_enabled, vport->stat_data_blocked,
+		bucket_type, phba->bucket_base, phba->bucket_step);
+	index = strlen(buf);
+	if (phba->bucket_type != LPFC_NO_BUCKET) {
+		for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
+			if (phba->bucket_type == LPFC_LINEAR_BUCKET)
+				bucket_value = phba->bucket_base +
+					phba->bucket_step * i;
+			else
+				bucket_value = phba->bucket_base +
+				(1 << i) * phba->bucket_step;
+
+			if (index + 10 > PAGE_SIZE)
+				break;
+			sprintf(&buf[index], "%08ld ", bucket_value);
+			index = strlen(buf);
+		}
+	}
+	sprintf(&buf[index], "\n");
+	return strlen(buf);
+}
+
+/*
+ * Sysfs attribute to control the statistical data collection.
+ */
+static DEVICE_ATTR(lpfc_stat_data_ctrl, S_IRUGO | S_IWUSR,
+		   lpfc_stat_data_ctrl_show, lpfc_stat_data_ctrl_store);
+
+/*
+ * lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
+ */
+
+/*
+ * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN
+ * for each target.
+ */
+#define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
+#define MAX_STAT_DATA_SIZE_PER_TARGET \
+	STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
+
+
+/**
+ * sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute
+ * @filp: sysfs file
+ * @kobj: Pointer to the kernel object
+ * @bin_attr: Attribute object
+ * @buff: Buffer pointer
+ * @off: File offset
+ * @count: Buffer size
+ *
+ * This function is the read call back function for lpfc_drvr_stat_data
+ * sysfs file. This function export the statistical data to user
+ * applications.
+ **/
+static ssize_t
+sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *bin_attr,
+		char *buf, loff_t off, size_t count)
+{
+	struct device *dev = container_of(kobj, struct device,
+		kobj);
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	int i = 0, index = 0;
+	unsigned long nport_index;
+	struct lpfc_nodelist *ndlp = NULL;
+	nport_index = (unsigned long)off /
+		MAX_STAT_DATA_SIZE_PER_TARGET;
+
+	if (!vport->stat_data_enabled || vport->stat_data_blocked
+		|| (phba->bucket_type == LPFC_NO_BUCKET))
+		return 0;
+
+	spin_lock_irq(shost->host_lock);
+	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp) || !ndlp->lat_data)
+			continue;
+
+		if (nport_index > 0) {
+			nport_index--;
+			continue;
+		}
+
+		if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
+			> count)
+			break;
+
+		if (!ndlp->lat_data)
+			continue;
+
+		/* Print the WWN */
+		sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
+			ndlp->nlp_portname.u.wwn[0],
+			ndlp->nlp_portname.u.wwn[1],
+			ndlp->nlp_portname.u.wwn[2],
+			ndlp->nlp_portname.u.wwn[3],
+			ndlp->nlp_portname.u.wwn[4],
+			ndlp->nlp_portname.u.wwn[5],
+			ndlp->nlp_portname.u.wwn[6],
+			ndlp->nlp_portname.u.wwn[7]);
+
+		index = strlen(buf);
+
+		for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
+			sprintf(&buf[index], "%010u,",
+				ndlp->lat_data[i].cmd_count);
+			index = strlen(buf);
+		}
+		sprintf(&buf[index], "\n");
+		index = strlen(buf);
+	}
+	spin_unlock_irq(shost->host_lock);
+	return index;
+}
+
+static struct bin_attribute sysfs_drvr_stat_data_attr = {
+	.attr = {
+		.name = "lpfc_drvr_stat_data",
+		.mode = S_IRUSR,
+	},
+	.size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
+	.read = sysfs_drvr_stat_data_read,
+	.write = NULL,
+};
+
+/*
+# lpfc_link_speed: Link speed selection for initializing the Fibre Channel
+# connection.
+# Value range is [0,16]. Default value is 0.
+*/
+/**
+ * lpfc_link_speed_set - Set the adapters link speed
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range then set the adapter's link speed field and
+ * issue a lip; if the lip fails reset the link speed to the old value.
+ *
+ * Notes:
+ * If the value is not in range log a kernel error message and return an error.
+ *
+ * Returns:
+ * zero if val is in range and lip okay.
+ * non-zero return value from lpfc_issue_lip()
+ * -EINVAL val out of range
+ **/
+static ssize_t
+lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	int val = LPFC_USER_LINK_SPEED_AUTO;
+	int nolip = 0;
+	const char *val_buf = buf;
+	int err;
+	uint32_t prev_val;
+
+	if (!strncmp(buf, "nolip ", strlen("nolip "))) {
+		nolip = 1;
+		val_buf = &buf[strlen("nolip ")];
+	}
+
+	if (!isdigit(val_buf[0]))
+		return -EINVAL;
+	if (sscanf(val_buf, "%i", &val) != 1)
+		return -EINVAL;
+
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+		"3055 lpfc_link_speed changed from %d to %d %s\n",
+		phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
+
+	if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb))) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2879 lpfc_link_speed attribute cannot be set "
+				"to %d. Speed is not supported by this port.\n",
+				val);
+		return -EINVAL;
+	}
+	if (val == LPFC_USER_LINK_SPEED_16G &&
+		 phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3112 lpfc_link_speed attribute cannot be set "
+				"to %d. Speed is not supported in loop mode.\n",
+				val);
+		return -EINVAL;
+	}
+	if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
+	    (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
+		prev_val = phba->cfg_link_speed;
+		phba->cfg_link_speed = val;
+		if (nolip)
+			return strlen(buf);
+
+		err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
+		if (err) {
+			phba->cfg_link_speed = prev_val;
+			return -EINVAL;
+		} else
+			return strlen(buf);
+	}
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+		"0469 lpfc_link_speed attribute cannot be set to %d, "
+		"allowed values are ["LPFC_LINK_SPEED_STRING"]\n", val);
+	return -EINVAL;
+}
+
+static int lpfc_link_speed = 0;
+module_param(lpfc_link_speed, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
+lpfc_param_show(link_speed)
+
+/**
+ * lpfc_link_speed_init - Set the adapters link speed
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range then set the adapter's link speed field.
+ *
+ * Notes:
+ * If the value is not in range log a kernel error message, clear the link
+ * speed and return an error.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_link_speed_init(struct lpfc_hba *phba, int val)
+{
+	if (val == LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"3111 lpfc_link_speed of %d cannot "
+			"support loop mode, setting topology to default.\n",
+			 val);
+		phba->cfg_topology = 0;
+	}
+	if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
+	    (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
+		phba->cfg_link_speed = val;
+		return 0;
+	}
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0405 lpfc_link_speed attribute cannot "
+			"be set to %d, allowed values are "
+			"["LPFC_LINK_SPEED_STRING"]\n", val);
+	phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
+		   lpfc_link_speed_show, lpfc_link_speed_store);
+
+/*
+# lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
+#       0  = aer disabled or not supported
+#       1  = aer supported and enabled (default)
+# Value range is [0,1]. Default value is 1.
+*/
+
+/**
+ * lpfc_aer_support_store - Set the adapter for aer support
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing enable or disable aer flag.
+ * @count: unused variable.
+ *
+ * Description:
+ * If the val is 1 and currently the device's AER capability was not
+ * enabled, invoke the kernel's enable AER helper routine, trying to
+ * enable the device's AER capability. If the helper routine enabling
+ * AER returns success, update the device's cfg_aer_support flag to
+ * indicate AER is supported by the device; otherwise, if the device
+ * AER capability is already enabled to support AER, then do nothing.
+ *
+ * If the val is 0 and currently the device's AER support was enabled,
+ * invoke the kernel's disable AER helper routine. After that, update
+ * the device's cfg_aer_support flag to indicate AER is not supported
+ * by the device; otherwise, if the device AER capability is already
+ * disabled from supporting AER, then do nothing.
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
+		       const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	int val = 0, rc = -EINVAL;
+
+	if (!isdigit(buf[0]))
+		return -EINVAL;
+	if (sscanf(buf, "%i", &val) != 1)
+		return -EINVAL;
+
+	switch (val) {
+	case 0:
+		if (phba->hba_flag & HBA_AER_ENABLED) {
+			rc = pci_disable_pcie_error_reporting(phba->pcidev);
+			if (!rc) {
+				spin_lock_irq(&phba->hbalock);
+				phba->hba_flag &= ~HBA_AER_ENABLED;
+				spin_unlock_irq(&phba->hbalock);
+				phba->cfg_aer_support = 0;
+				rc = strlen(buf);
+			} else
+				rc = -EPERM;
+		} else {
+			phba->cfg_aer_support = 0;
+			rc = strlen(buf);
+		}
+		break;
+	case 1:
+		if (!(phba->hba_flag & HBA_AER_ENABLED)) {
+			rc = pci_enable_pcie_error_reporting(phba->pcidev);
+			if (!rc) {
+				spin_lock_irq(&phba->hbalock);
+				phba->hba_flag |= HBA_AER_ENABLED;
+				spin_unlock_irq(&phba->hbalock);
+				phba->cfg_aer_support = 1;
+				rc = strlen(buf);
+			} else
+				 rc = -EPERM;
+		} else {
+			phba->cfg_aer_support = 1;
+			rc = strlen(buf);
+		}
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static int lpfc_aer_support = 1;
+module_param(lpfc_aer_support, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
+lpfc_param_show(aer_support)
+
+/**
+ * lpfc_aer_support_init - Set the initial adapters aer support flag
+ * @phba: lpfc_hba pointer.
+ * @val: enable aer or disable aer flag.
+ *
+ * Description:
+ * If val is in a valid range [0,1], then set the adapter's initial
+ * cfg_aer_support field. It will be up to the driver's probe_one
+ * routine to determine whether the device's AER support can be set
+ * or not.
+ *
+ * Notes:
+ * If the value is not in range log a kernel error message, and
+ * choose the default value of setting AER support and return.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_aer_support_init(struct lpfc_hba *phba, int val)
+{
+	if (val == 0 || val == 1) {
+		phba->cfg_aer_support = val;
+		return 0;
+	}
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2712 lpfc_aer_support attribute value %d out "
+			"of range, allowed values are 0|1, setting it "
+			"to default value of 1\n", val);
+	/* By default, try to enable AER on a device */
+	phba->cfg_aer_support = 1;
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
+		   lpfc_aer_support_show, lpfc_aer_support_store);
+
+/**
+ * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing flag 1 for aer cleanup state.
+ * @count: unused variable.
+ *
+ * Description:
+ * If the @buf contains 1 and the device currently has the AER support
+ * enabled, then invokes the kernel AER helper routine
+ * pci_cleanup_aer_uncorrect_error_status to clean up the uncorrectable
+ * error status register.
+ *
+ * Notes:
+ *
+ * Returns:
+ * -EINVAL if the buf does not contain the 1 or the device is not currently
+ * enabled with the AER support.
+ **/
+static ssize_t
+lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
+		       const char *buf, size_t count)
+{
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	int val, rc = -1;
+
+	if (!isdigit(buf[0]))
+		return -EINVAL;
+	if (sscanf(buf, "%i", &val) != 1)
+		return -EINVAL;
+	if (val != 1)
+		return -EINVAL;
+
+	if (phba->hba_flag & HBA_AER_ENABLED)
+		rc = pci_cleanup_aer_uncorrect_error_status(phba->pcidev);
+
+	if (rc == 0)
+		return strlen(buf);
+	else
+		return -EPERM;
+}
+
+static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
+		   lpfc_aer_cleanup_state);
+
+/**
+ * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string the number of vfs to be enabled.
+ * @count: unused variable.
+ *
+ * Description:
+ * When this api is called either through user sysfs, the driver shall
+ * try to enable or disable SR-IOV virtual functions according to the
+ * following:
+ *
+ * If zero virtual function has been enabled to the physical function,
+ * the driver shall invoke the pci enable virtual function api trying
+ * to enable the virtual functions. If the nr_vfn provided is greater
+ * than the maximum supported, the maximum virtual function number will
+ * be used for invoking the api; otherwise, the nr_vfn provided shall
+ * be used for invoking the api. If the api call returned success, the
+ * actual number of virtual functions enabled will be set to the driver
+ * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
+ * cfg_sriov_nr_virtfn remains zero.
+ *
+ * If none-zero virtual functions have already been enabled to the
+ * physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
+ * -EINVAL will be returned and the driver does nothing;
+ *
+ * If the nr_vfn provided is zero and none-zero virtual functions have
+ * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
+ * disabling virtual function api shall be invoded to disable all the
+ * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
+ * zero. Otherwise, if zero virtual function has been enabled, do
+ * nothing.
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct pci_dev *pdev = phba->pcidev;
+	int val = 0, rc = -EINVAL;
+
+	/* Sanity check on user data */
+	if (!isdigit(buf[0]))
+		return -EINVAL;
+	if (sscanf(buf, "%i", &val) != 1)
+		return -EINVAL;
+	if (val < 0)
+		return -EINVAL;
+
+	/* Request disabling virtual functions */
+	if (val == 0) {
+		if (phba->cfg_sriov_nr_virtfn > 0) {
+			pci_disable_sriov(pdev);
+			phba->cfg_sriov_nr_virtfn = 0;
+		}
+		return strlen(buf);
+	}
+
+	/* Request enabling virtual functions */
+	if (phba->cfg_sriov_nr_virtfn > 0) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3018 There are %d virtual functions "
+				"enabled on physical function.\n",
+				phba->cfg_sriov_nr_virtfn);
+		return -EEXIST;
+	}
+
+	if (val <= LPFC_MAX_VFN_PER_PFN)
+		phba->cfg_sriov_nr_virtfn = val;
+	else {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3019 Enabling %d virtual functions is not "
+				"allowed.\n", val);
+		return -EINVAL;
+	}
+
+	rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
+	if (rc) {
+		phba->cfg_sriov_nr_virtfn = 0;
+		rc = -EPERM;
+	} else
+		rc = strlen(buf);
+
+	return rc;
+}
+
+static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN;
+module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn");
+lpfc_param_show(sriov_nr_virtfn)
+
+/**
+ * lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range [0,255], then set the adapter's initial
+ * cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum
+ * number shall be used instead. It will be up to the driver's probe_one
+ * routine to determine whether the device's SR-IOV is supported or not.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
+{
+	if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) {
+		phba->cfg_sriov_nr_virtfn = val;
+		return 0;
+	}
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"3017 Enabling %d virtual functions is not "
+			"allowed.\n", val);
+	return -EINVAL;
+}
+static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
+		   lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
+
+/*
+# lpfc_fcp_class:  Determines FC class to use for the FCP protocol.
+# Value range is [2,3]. Default value is 3.
+*/
+LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3,
+		  "Select Fibre Channel class of service for FCP sequences");
+
+/*
+# lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
+# is [0,1]. Default value is 0.
+*/
+LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
+		   "Use ADISC on rediscovery to authenticate FCP devices");
+
+/*
+# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
+# depth. Default value is 0. When the value of this parameter is zero the
+# SCSI command completion time is not used for controlling I/O queue depth. When
+# the parameter is set to a non-zero value, the I/O queue depth is controlled
+# to limit the I/O completion time to the parameter value.
+# The value is set in milliseconds.
+*/
+static int lpfc_max_scsicmpl_time;
+module_param(lpfc_max_scsicmpl_time, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_max_scsicmpl_time,
+	"Use command completion time to control queue depth");
+lpfc_vport_param_show(max_scsicmpl_time);
+lpfc_vport_param_init(max_scsicmpl_time, 0, 0, 60000);
+static int
+lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp, *next_ndlp;
+
+	if (val == vport->cfg_max_scsicmpl_time)
+		return 0;
+	if ((val < 0) || (val > 60000))
+		return -EINVAL;
+	vport->cfg_max_scsicmpl_time = val;
+
+	spin_lock_irq(shost->host_lock);
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			continue;
+		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+			continue;
+		ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
+	}
+	spin_unlock_irq(shost->host_lock);
+	return 0;
+}
+lpfc_vport_param_store(max_scsicmpl_time);
+static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR,
+		   lpfc_max_scsicmpl_time_show,
+		   lpfc_max_scsicmpl_time_store);
+
+/*
+# lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
+# range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
+
+/*
+# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
+# cr_delay (msec) or cr_count outstanding commands. cr_delay can take
+# value [0,63]. cr_count can take value [1,255]. Default value of cr_delay
+# is 0. Default value of cr_count is 1. The cr_count feature is disabled if
+# cr_delay is set to 0.
+*/
+LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an "
+		"interrupt response is generated");
+
+LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an "
+		"interrupt response is generated");
+
+/*
+# lpfc_multi_ring_support:  Determines how many rings to spread available
+# cmd/rsp IOCB entries across.
+# Value range is [1,2]. Default value is 1.
+*/
+LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
+		"SLI rings to spread IOCB entries across");
+
+/*
+# lpfc_multi_ring_rctl:  If lpfc_multi_ring_support is enabled, this
+# identifies what rctl value to configure the additional ring for.
+# Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
+*/
+LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1,
+	     255, "Identifies RCTL for additional ring configuration");
+
+/*
+# lpfc_multi_ring_type:  If lpfc_multi_ring_support is enabled, this
+# identifies what type value to configure the additional ring for.
+# Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
+*/
+LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1,
+	     255, "Identifies TYPE for additional ring configuration");
+
+/*
+# lpfc_fdmi_on: controls FDMI support.
+#       0 = no FDMI support
+#       1 = support FDMI without attribute of hostname
+#       2 = support FDMI with attribute of hostname
+# Value range [0,2]. Default value is 0.
+*/
+LPFC_VPORT_ATTR_RW(fdmi_on, 0, 0, 2, "Enable FDMI support");
+
+/*
+# Specifies the maximum number of ELS cmds we can have outstanding (for
+# discovery). Value range is [1,64]. Default value = 32.
+*/
+LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
+		 "during discovery");
+
+/*
+# lpfc_max_luns: maximum allowed LUN.
+# Value range is [0,65535]. Default value is 255.
+# NOTE: The SCSI layer might probe all allowed LUN on some old targets.
+*/
+LPFC_VPORT_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN");
+
+/*
+# lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.
+# Value range is [1,255], default value is 10.
+*/
+LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
+	     "Milliseconds driver will wait between polling FCP ring");
+
+/*
+# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
+#		support this feature
+#       0  = MSI disabled
+#       1  = MSI enabled
+#       2  = MSI-X enabled (default)
+# Value range is [0,2]. Default value is 2.
+*/
+LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
+	    "MSI-X (2), if possible");
+
+/*
+# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second
+#
+# Value range is [636,651042]. Default value is 10000.
+*/
+LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST,
+	    "Set the maximum number of fast-path FCP interrupts per second");
+
+/*
+# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
+#
+# Value range is [1,31]. Default value is 4.
+*/
+LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX,
+	    "Set the number of fast-path FCP work queues, if possible");
+
+/*
+# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues
+#
+# Value range is [1,7]. Default value is 1.
+*/
+LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX,
+	    "Set the number of fast-path FCP event queues, if possible");
+
+/*
+# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
+#       0  = HBA resets disabled
+#       1  = HBA resets enabled (default)
+# Value range is [0,1]. Default value is 1.
+*/
+LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
+
+/*
+# lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer..
+#       0  = HBA Heartbeat disabled
+#       1  = HBA Heartbeat enabled (default)
+# Value range is [0,1]. Default value is 1.
+*/
+LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
+
+/*
+# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
+#       0  = BlockGuard disabled (default)
+#       1  = BlockGuard enabled
+# Value range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
+
+/*
+# lpfc_prot_mask: i
+#	- Bit mask of host protection capabilities used to register with the
+#	  SCSI mid-layer
+# 	- Only meaningful if BG is turned on (lpfc_enable_bg=1).
+#	- Allows you to ultimately specify which profiles to use
+#	- Default will result in registering capabilities for all profiles.
+#
+*/
+unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION |
+			      SHOST_DIX_TYPE0_PROTECTION |
+			      SHOST_DIX_TYPE1_PROTECTION;
+
+module_param(lpfc_prot_mask, uint, S_IRUGO);
+MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
+
+/*
+# lpfc_prot_guard: i
+#	- Bit mask of protection guard types to register with the SCSI mid-layer
+# 	- Guard types are currently either 1) IP checksum 2) T10-DIF CRC
+#	- Allows you to ultimately specify which profiles to use
+#	- Default will result in registering capabilities for all guard types
+#
+*/
+unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP;
+module_param(lpfc_prot_guard, byte, S_IRUGO);
+MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
+
+/*
+ * Delay initial NPort discovery when Clean Address bit is cleared in
+ * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed.
+ * This parameter can have value 0 or 1.
+ * When this parameter is set to 0, no delay is added to the initial
+ * discovery.
+ * When this parameter is set to non-zero value, initial Nport discovery is
+ * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC
+ * accept and FCID/Fabric name/Fabric portname is changed.
+ * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion
+ * when Clean Address bit is cleared in FLOGI/FDISC
+ * accept and FCID/Fabric name/Fabric portname is changed.
+ * Default value is 0.
+ */
+int lpfc_delay_discovery;
+module_param(lpfc_delay_discovery, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_delay_discovery,
+	"Delay NPort discovery when Clean Address bit is cleared. "
+	"Allowed values: 0,1.");
+
+/*
+ * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
+ * This value can be set to values between 64 and 256. The default value is
+ * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
+ * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
+ */
+LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
+	    LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
+
+LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT,
+		LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT,
+		"Max Protection Scatter Gather Segment Count");
+
+struct device_attribute *lpfc_hba_attrs[] = {
+	&dev_attr_bg_info,
+	&dev_attr_bg_guard_err,
+	&dev_attr_bg_apptag_err,
+	&dev_attr_bg_reftag_err,
+	&dev_attr_info,
+	&dev_attr_serialnum,
+	&dev_attr_modeldesc,
+	&dev_attr_modelname,
+	&dev_attr_programtype,
+	&dev_attr_portnum,
+	&dev_attr_fwrev,
+	&dev_attr_hdw,
+	&dev_attr_option_rom_version,
+	&dev_attr_link_state,
+	&dev_attr_num_discovered_ports,
+	&dev_attr_menlo_mgmt_mode,
+	&dev_attr_lpfc_drvr_version,
+	&dev_attr_lpfc_enable_fip,
+	&dev_attr_lpfc_temp_sensor,
+	&dev_attr_lpfc_log_verbose,
+	&dev_attr_lpfc_lun_queue_depth,
+	&dev_attr_lpfc_tgt_queue_depth,
+	&dev_attr_lpfc_hba_queue_depth,
+	&dev_attr_lpfc_peer_port_login,
+	&dev_attr_lpfc_nodev_tmo,
+	&dev_attr_lpfc_devloss_tmo,
+	&dev_attr_lpfc_fcp_class,
+	&dev_attr_lpfc_use_adisc,
+	&dev_attr_lpfc_ack0,
+	&dev_attr_lpfc_topology,
+	&dev_attr_lpfc_scan_down,
+	&dev_attr_lpfc_link_speed,
+	&dev_attr_lpfc_cr_delay,
+	&dev_attr_lpfc_cr_count,
+	&dev_attr_lpfc_multi_ring_support,
+	&dev_attr_lpfc_multi_ring_rctl,
+	&dev_attr_lpfc_multi_ring_type,
+	&dev_attr_lpfc_fdmi_on,
+	&dev_attr_lpfc_max_luns,
+	&dev_attr_lpfc_enable_npiv,
+	&dev_attr_lpfc_fcf_failover_policy,
+	&dev_attr_lpfc_enable_rrq,
+	&dev_attr_nport_evt_cnt,
+	&dev_attr_board_mode,
+	&dev_attr_max_vpi,
+	&dev_attr_used_vpi,
+	&dev_attr_max_rpi,
+	&dev_attr_used_rpi,
+	&dev_attr_max_xri,
+	&dev_attr_used_xri,
+	&dev_attr_npiv_info,
+	&dev_attr_issue_reset,
+	&dev_attr_lpfc_poll,
+	&dev_attr_lpfc_poll_tmo,
+	&dev_attr_lpfc_use_msi,
+	&dev_attr_lpfc_fcp_imax,
+	&dev_attr_lpfc_fcp_wq_count,
+	&dev_attr_lpfc_fcp_eq_count,
+	&dev_attr_lpfc_enable_bg,
+	&dev_attr_lpfc_soft_wwnn,
+	&dev_attr_lpfc_soft_wwpn,
+	&dev_attr_lpfc_soft_wwn_enable,
+	&dev_attr_lpfc_enable_hba_reset,
+	&dev_attr_lpfc_enable_hba_heartbeat,
+	&dev_attr_lpfc_sg_seg_cnt,
+	&dev_attr_lpfc_max_scsicmpl_time,
+	&dev_attr_lpfc_stat_data_ctrl,
+	&dev_attr_lpfc_prot_sg_seg_cnt,
+	&dev_attr_lpfc_aer_support,
+	&dev_attr_lpfc_aer_state_cleanup,
+	&dev_attr_lpfc_sriov_nr_virtfn,
+	&dev_attr_lpfc_suppress_link_up,
+	&dev_attr_lpfc_iocb_cnt,
+	&dev_attr_iocb_hw,
+	&dev_attr_txq_hw,
+	&dev_attr_txcmplq_hw,
+	&dev_attr_lpfc_fips_level,
+	&dev_attr_lpfc_fips_rev,
+	&dev_attr_lpfc_dss,
+	&dev_attr_lpfc_sriov_hw_max_virtfn,
+	&dev_attr_protocol,
+	NULL,
+};
+
+struct device_attribute *lpfc_vport_attrs[] = {
+	&dev_attr_info,
+	&dev_attr_link_state,
+	&dev_attr_num_discovered_ports,
+	&dev_attr_lpfc_drvr_version,
+	&dev_attr_lpfc_log_verbose,
+	&dev_attr_lpfc_lun_queue_depth,
+	&dev_attr_lpfc_tgt_queue_depth,
+	&dev_attr_lpfc_nodev_tmo,
+	&dev_attr_lpfc_devloss_tmo,
+	&dev_attr_lpfc_hba_queue_depth,
+	&dev_attr_lpfc_peer_port_login,
+	&dev_attr_lpfc_restrict_login,
+	&dev_attr_lpfc_fcp_class,
+	&dev_attr_lpfc_use_adisc,
+	&dev_attr_lpfc_fdmi_on,
+	&dev_attr_lpfc_max_luns,
+	&dev_attr_nport_evt_cnt,
+	&dev_attr_npiv_info,
+	&dev_attr_lpfc_enable_da_id,
+	&dev_attr_lpfc_max_scsicmpl_time,
+	&dev_attr_lpfc_stat_data_ctrl,
+	&dev_attr_lpfc_static_vport,
+	&dev_attr_lpfc_fips_level,
+	&dev_attr_lpfc_fips_rev,
+	NULL,
+};
+
+/**
+ * sysfs_ctlreg_write - Write method for writing to ctlreg
+ * @filp: open sysfs file
+ * @kobj: kernel kobject that contains the kernel class device.
+ * @bin_attr: kernel attributes passed to us.
+ * @buf: contains the data to be written to the adapter IOREG space.
+ * @off: offset into buffer to beginning of data.
+ * @count: bytes to transfer.
+ *
+ * Description:
+ * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
+ * Uses the adapter io control registers to send buf contents to the adapter.
+ *
+ * Returns:
+ * -ERANGE off and count combo out of range
+ * -EINVAL off, count or buff address invalid
+ * -EPERM adapter is offline
+ * value of count, buf contents written
+ **/
+static ssize_t
+sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
+		   struct bin_attribute *bin_attr,
+		   char *buf, loff_t off, size_t count)
+{
+	size_t buf_off;
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	if (phba->sli_rev >= LPFC_SLI_REV4)
+		return -EPERM;
+
+	if ((off + count) > FF_REG_AREA_SIZE)
+		return -ERANGE;
+
+	if (count <= LPFC_REG_WRITE_KEY_SIZE)
+		return 0;
+
+	if (off % 4 || count % 4 || (unsigned long)buf % 4)
+		return -EINVAL;
+
+	/* This is to protect HBA registers from accidental writes. */
+	if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE))
+		return -EINVAL;
+
+	if (!(vport->fc_flag & FC_OFFLINE_MODE))
+		return -EPERM;
+
+	spin_lock_irq(&phba->hbalock);
+	for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE;
+			buf_off += sizeof(uint32_t))
+		writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)),
+		       phba->ctrl_regs_memmap_p + off + buf_off);
+
+	spin_unlock_irq(&phba->hbalock);
+
+	return count;
+}
+
+/**
+ * sysfs_ctlreg_read - Read method for reading from ctlreg
+ * @filp: open sysfs file
+ * @kobj: kernel kobject that contains the kernel class device.
+ * @bin_attr: kernel attributes passed to us.
+ * @buf: if successful contains the data from the adapter IOREG space.
+ * @off: offset into buffer to beginning of data.
+ * @count: bytes to transfer.
+ *
+ * Description:
+ * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
+ * Uses the adapter io control registers to read data into buf.
+ *
+ * Returns:
+ * -ERANGE off and count combo out of range
+ * -EINVAL off, count or buff address invalid
+ * value of count, buf contents read
+ **/
+static ssize_t
+sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
+		  struct bin_attribute *bin_attr,
+		  char *buf, loff_t off, size_t count)
+{
+	size_t buf_off;
+	uint32_t * tmp_ptr;
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct Scsi_Host  *shost = class_to_shost(dev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	if (phba->sli_rev >= LPFC_SLI_REV4)
+		return -EPERM;
+
+	if (off > FF_REG_AREA_SIZE)
+		return -ERANGE;
+
+	if ((off + count) > FF_REG_AREA_SIZE)
+		count = FF_REG_AREA_SIZE - off;
+
+	if (count == 0) return 0;
+
+	if (off % 4 || count % 4 || (unsigned long)buf % 4)
+		return -EINVAL;
+
+	spin_lock_irq(&phba->hbalock);
+
+	for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
+		tmp_ptr = (uint32_t *)(buf + buf_off);
+		*tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off);
+	}
+
+	spin_unlock_irq(&phba->hbalock);
+
+	return count;
+}
+
+static struct bin_attribute sysfs_ctlreg_attr = {
+	.attr = {
+		.name = "ctlreg",
+		.mode = S_IRUSR | S_IWUSR,
+	},
+	.size = 256,
+	.read = sysfs_ctlreg_read,
+	.write = sysfs_ctlreg_write,
+};
+
+/**
+ * sysfs_mbox_write - Write method for writing information via mbox
+ * @filp: open sysfs file
+ * @kobj: kernel kobject that contains the kernel class device.
+ * @bin_attr: kernel attributes passed to us.
+ * @buf: contains the data to be written to sysfs mbox.
+ * @off: offset into buffer to beginning of data.
+ * @count: bytes to transfer.
+ *
+ * Description:
+ * Deprecated function. All mailbox access from user space is performed via the
+ * bsg interface.
+ *
+ * Returns:
+ * -EPERM operation not permitted
+ **/
+static ssize_t
+sysfs_mbox_write(struct file *filp, struct kobject *kobj,
+		 struct bin_attribute *bin_attr,
+		 char *buf, loff_t off, size_t count)
+{
+	return -EPERM;
+}
+
+/**
+ * sysfs_mbox_read - Read method for reading information via mbox
+ * @filp: open sysfs file
+ * @kobj: kernel kobject that contains the kernel class device.
+ * @bin_attr: kernel attributes passed to us.
+ * @buf: contains the data to be read from sysfs mbox.
+ * @off: offset into buffer to beginning of data.
+ * @count: bytes to transfer.
+ *
+ * Description:
+ * Deprecated function. All mailbox access from user space is performed via the
+ * bsg interface.
+ *
+ * Returns:
+ * -EPERM operation not permitted
+ **/
+static ssize_t
+sysfs_mbox_read(struct file *filp, struct kobject *kobj,
+		struct bin_attribute *bin_attr,
+		char *buf, loff_t off, size_t count)
+{
+	return -EPERM;
+}
+
+static struct bin_attribute sysfs_mbox_attr = {
+	.attr = {
+		.name = "mbox",
+		.mode = S_IRUSR | S_IWUSR,
+	},
+	.size = MAILBOX_SYSFS_MAX,
+	.read = sysfs_mbox_read,
+	.write = sysfs_mbox_write,
+};
+
+/**
+ * lpfc_alloc_sysfs_attr - Creates the ctlreg and mbox entries
+ * @vport: address of lpfc vport structure.
+ *
+ * Return codes:
+ * zero on success
+ * error return code from sysfs_create_bin_file()
+ **/
+int
+lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	int error;
+
+	error = sysfs_create_bin_file(&shost->shost_dev.kobj,
+				      &sysfs_drvr_stat_data_attr);
+
+	/* Virtual ports do not need ctrl_reg and mbox */
+	if (error || vport->port_type == LPFC_NPIV_PORT)
+		goto out;
+
+	error = sysfs_create_bin_file(&shost->shost_dev.kobj,
+				      &sysfs_ctlreg_attr);
+	if (error)
+		goto out_remove_stat_attr;
+
+	error = sysfs_create_bin_file(&shost->shost_dev.kobj,
+				      &sysfs_mbox_attr);
+	if (error)
+		goto out_remove_ctlreg_attr;
+
+	return 0;
+out_remove_ctlreg_attr:
+	sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
+out_remove_stat_attr:
+	sysfs_remove_bin_file(&shost->shost_dev.kobj,
+			&sysfs_drvr_stat_data_attr);
+out:
+	return error;
+}
+
+/**
+ * lpfc_free_sysfs_attr - Removes the ctlreg and mbox entries
+ * @vport: address of lpfc vport structure.
+ **/
+void
+lpfc_free_sysfs_attr(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	sysfs_remove_bin_file(&shost->shost_dev.kobj,
+		&sysfs_drvr_stat_data_attr);
+	/* Virtual ports do not need ctrl_reg and mbox */
+	if (vport->port_type == LPFC_NPIV_PORT)
+		return;
+	sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
+	sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
+}
+
+
+/*
+ * Dynamic FC Host Attributes Support
+ */
+
+/**
+ * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_get_host_port_id(struct Scsi_Host *shost)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+
+	/* note: fc_myDID already in cpu endianness */
+	fc_host_port_id(shost) = vport->fc_myDID;
+}
+
+/**
+ * lpfc_get_host_port_type - Set the value of the scsi host port type
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_get_host_port_type(struct Scsi_Host *shost)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	spin_lock_irq(shost->host_lock);
+
+	if (vport->port_type == LPFC_NPIV_PORT) {
+		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+	} else if (lpfc_is_link_up(phba)) {
+		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+			if (vport->fc_flag & FC_PUBLIC_LOOP)
+				fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+			else
+				fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
+		} else {
+			if (vport->fc_flag & FC_FABRIC)
+				fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+			else
+				fc_host_port_type(shost) = FC_PORTTYPE_PTP;
+		}
+	} else
+		fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
+
+	spin_unlock_irq(shost->host_lock);
+}
+
+/**
+ * lpfc_get_host_port_state - Set the value of the scsi host port state
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_get_host_port_state(struct Scsi_Host *shost)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	spin_lock_irq(shost->host_lock);
+
+	if (vport->fc_flag & FC_OFFLINE_MODE)
+		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+	else {
+		switch (phba->link_state) {
+		case LPFC_LINK_UNKNOWN:
+		case LPFC_LINK_DOWN:
+			fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+			break;
+		case LPFC_LINK_UP:
+		case LPFC_CLEAR_LA:
+		case LPFC_HBA_READY:
+			/* Links up, reports port state accordingly */
+			if (vport->port_state < LPFC_VPORT_READY)
+				fc_host_port_state(shost) =
+							FC_PORTSTATE_BYPASSED;
+			else
+				fc_host_port_state(shost) =
+							FC_PORTSTATE_ONLINE;
+			break;
+		case LPFC_HBA_ERROR:
+			fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
+			break;
+		default:
+			fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+			break;
+		}
+	}
+
+	spin_unlock_irq(shost->host_lock);
+}
+
+/**
+ * lpfc_get_host_speed - Set the value of the scsi host speed
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_get_host_speed(struct Scsi_Host *shost)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	spin_lock_irq(shost->host_lock);
+
+	if (lpfc_is_link_up(phba)) {
+		switch(phba->fc_linkspeed) {
+		case LPFC_LINK_SPEED_1GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+			break;
+		case LPFC_LINK_SPEED_2GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
+			break;
+		case LPFC_LINK_SPEED_4GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
+			break;
+		case LPFC_LINK_SPEED_8GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
+			break;
+		case LPFC_LINK_SPEED_10GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+			break;
+		case LPFC_LINK_SPEED_16GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
+			break;
+		default:
+			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+			break;
+		}
+	} else
+		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+
+	spin_unlock_irq(shost->host_lock);
+}
+
+/**
+ * lpfc_get_host_fabric_name - Set the value of the scsi host fabric name
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_get_host_fabric_name (struct Scsi_Host *shost)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	u64 node_name;
+
+	spin_lock_irq(shost->host_lock);
+
+	if ((vport->port_state > LPFC_FLOGI) &&
+	    ((vport->fc_flag & FC_FABRIC) ||
+	     ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
+	      (vport->fc_flag & FC_PUBLIC_LOOP))))
+		node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
+	else
+		/* fabric is local port if there is no F/FL_Port */
+		node_name = 0;
+
+	spin_unlock_irq(shost->host_lock);
+
+	fc_host_fabric_name(shost) = node_name;
+}
+
+/**
+ * lpfc_get_stats - Return statistical information about the adapter
+ * @shost: kernel scsi host pointer.
+ *
+ * Notes:
+ * NULL on error for link down, no mbox pool, sli2 active,
+ * management not allowed, memory allocation error, or mbox error.
+ *
+ * Returns:
+ * NULL for error
+ * address of the adapter host statistics
+ **/
+static struct fc_host_statistics *
+lpfc_get_stats(struct Scsi_Host *shost)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_sli   *psli = &phba->sli;
+	struct fc_host_statistics *hs = &phba->link_stats;
+	struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
+	LPFC_MBOXQ_t *pmboxq;
+	MAILBOX_t *pmb;
+	unsigned long seconds;
+	int rc = 0;
+
+	/*
+	 * prevent udev from issuing mailbox commands until the port is
+	 * configured.
+	 */
+	if (phba->link_state < LPFC_LINK_DOWN ||
+	    !phba->mbox_mem_pool ||
+	    (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
+		return NULL;
+
+	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
+		return NULL;
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq)
+		return NULL;
+	memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+
+	pmb = &pmboxq->u.mb;
+	pmb->mbxCommand = MBX_READ_STATUS;
+	pmb->mbxOwner = OWN_HOST;
+	pmboxq->context1 = NULL;
+	pmboxq->vport = vport;
+
+	if (vport->fc_flag & FC_OFFLINE_MODE)
+		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+	else
+		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+	if (rc != MBX_SUCCESS) {
+		if (rc != MBX_TIMEOUT)
+			mempool_free(pmboxq, phba->mbox_mem_pool);
+		return NULL;
+	}
+
+	memset(hs, 0, sizeof (struct fc_host_statistics));
+
+	hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
+	/*
+	 * The MBX_READ_STATUS returns tx_k_bytes which has to
+	 * converted to words
+	 */
+	hs->tx_words = (uint64_t)
+			((uint64_t)pmb->un.varRdStatus.xmitByteCnt
+			* (uint64_t)256);
+	hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
+	hs->rx_words = (uint64_t)
+			((uint64_t)pmb->un.varRdStatus.rcvByteCnt
+			 * (uint64_t)256);
+
+	memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+	pmb->mbxCommand = MBX_READ_LNK_STAT;
+	pmb->mbxOwner = OWN_HOST;
+	pmboxq->context1 = NULL;
+	pmboxq->vport = vport;
+
+	if (vport->fc_flag & FC_OFFLINE_MODE)
+		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+	else
+		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+	if (rc != MBX_SUCCESS) {
+		if (rc != MBX_TIMEOUT)
+			mempool_free(pmboxq, phba->mbox_mem_pool);
+		return NULL;
+	}
+
+	hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
+	hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
+	hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
+	hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
+	hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
+	hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
+	hs->error_frames = pmb->un.varRdLnk.crcCnt;
+
+	hs->link_failure_count -= lso->link_failure_count;
+	hs->loss_of_sync_count -= lso->loss_of_sync_count;
+	hs->loss_of_signal_count -= lso->loss_of_signal_count;
+	hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count;
+	hs->invalid_tx_word_count -= lso->invalid_tx_word_count;
+	hs->invalid_crc_count -= lso->invalid_crc_count;
+	hs->error_frames -= lso->error_frames;
+
+	if (phba->hba_flag & HBA_FCOE_MODE) {
+		hs->lip_count = -1;
+		hs->nos_count = (phba->link_events >> 1);
+		hs->nos_count -= lso->link_events;
+	} else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+		hs->lip_count = (phba->fc_eventTag >> 1);
+		hs->lip_count -= lso->link_events;
+		hs->nos_count = -1;
+	} else {
+		hs->lip_count = -1;
+		hs->nos_count = (phba->fc_eventTag >> 1);
+		hs->nos_count -= lso->link_events;
+	}
+
+	hs->dumped_frames = -1;
+
+	seconds = get_seconds();
+	if (seconds < psli->stats_start)
+		hs->seconds_since_last_reset = seconds +
+				((unsigned long)-1 - psli->stats_start);
+	else
+		hs->seconds_since_last_reset = seconds - psli->stats_start;
+
+	mempool_free(pmboxq, phba->mbox_mem_pool);
+
+	return hs;
+}
+
+/**
+ * lpfc_reset_stats - Copy the adapter link stats information
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_reset_stats(struct Scsi_Host *shost)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_sli   *psli = &phba->sli;
+	struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets;
+	LPFC_MBOXQ_t *pmboxq;
+	MAILBOX_t *pmb;
+	int rc = 0;
+
+	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
+		return;
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq)
+		return;
+	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+
+	pmb = &pmboxq->u.mb;
+	pmb->mbxCommand = MBX_READ_STATUS;
+	pmb->mbxOwner = OWN_HOST;
+	pmb->un.varWords[0] = 0x1; /* reset request */
+	pmboxq->context1 = NULL;
+	pmboxq->vport = vport;
+
+	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+		(!(psli->sli_flag & LPFC_SLI_ACTIVE)))
+		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+	else
+		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+	if (rc != MBX_SUCCESS) {
+		if (rc != MBX_TIMEOUT)
+			mempool_free(pmboxq, phba->mbox_mem_pool);
+		return;
+	}
+
+	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+	pmb->mbxCommand = MBX_READ_LNK_STAT;
+	pmb->mbxOwner = OWN_HOST;
+	pmboxq->context1 = NULL;
+	pmboxq->vport = vport;
+
+	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+	    (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
+		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+	else
+		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+	if (rc != MBX_SUCCESS) {
+		if (rc != MBX_TIMEOUT)
+			mempool_free( pmboxq, phba->mbox_mem_pool);
+		return;
+	}
+
+	lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
+	lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
+	lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
+	lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
+	lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
+	lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
+	lso->error_frames = pmb->un.varRdLnk.crcCnt;
+	if (phba->hba_flag & HBA_FCOE_MODE)
+		lso->link_events = (phba->link_events >> 1);
+	else
+		lso->link_events = (phba->fc_eventTag >> 1);
+
+	psli->stats_start = get_seconds();
+
+	mempool_free(pmboxq, phba->mbox_mem_pool);
+
+	return;
+}
+
+/*
+ * The LPFC driver treats linkdown handling as target loss events so there
+ * are no sysfs handlers for link_down_tmo.
+ */
+
+/**
+ * lpfc_get_node_by_target - Return the nodelist for a target
+ * @starget: kernel scsi target pointer.
+ *
+ * Returns:
+ * address of the node list if found
+ * NULL target not found
+ **/
+static struct lpfc_nodelist *
+lpfc_get_node_by_target(struct scsi_target *starget)
+{
+	struct Scsi_Host  *shost = dev_to_shost(starget->dev.parent);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_nodelist *ndlp;
+
+	spin_lock_irq(shost->host_lock);
+	/* Search for this, mapped, target ID */
+	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+		if (NLP_CHK_NODE_ACT(ndlp) &&
+		    ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
+		    starget->id == ndlp->nlp_sid) {
+			spin_unlock_irq(shost->host_lock);
+			return ndlp;
+		}
+	}
+	spin_unlock_irq(shost->host_lock);
+	return NULL;
+}
+
+/**
+ * lpfc_get_starget_port_id - Set the target port id to the ndlp DID or -1
+ * @starget: kernel scsi target pointer.
+ **/
+static void
+lpfc_get_starget_port_id(struct scsi_target *starget)
+{
+	struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
+
+	fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1;
+}
+
+/**
+ * lpfc_get_starget_node_name - Set the target node name
+ * @starget: kernel scsi target pointer.
+ *
+ * Description: Set the target node name to the ndlp node name wwn or zero.
+ **/
+static void
+lpfc_get_starget_node_name(struct scsi_target *starget)
+{
+	struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
+
+	fc_starget_node_name(starget) =
+		ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0;
+}
+
+/**
+ * lpfc_get_starget_port_name - Set the target port name
+ * @starget: kernel scsi target pointer.
+ *
+ * Description:  set the target port name to the ndlp port name wwn or zero.
+ **/
+static void
+lpfc_get_starget_port_name(struct scsi_target *starget)
+{
+	struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
+
+	fc_starget_port_name(starget) =
+		ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0;
+}
+
+/**
+ * lpfc_set_rport_loss_tmo - Set the rport dev loss tmo
+ * @rport: fc rport address.
+ * @timeout: new value for dev loss tmo.
+ *
+ * Description:
+ * If timeout is non zero set the dev_loss_tmo to timeout, else set
+ * dev_loss_tmo to one.
+ **/
+static void
+lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
+{
+	if (timeout)
+		rport->dev_loss_tmo = timeout;
+	else
+		rport->dev_loss_tmo = 1;
+}
+
+/**
+ * lpfc_rport_show_function - Return rport target information
+ *
+ * Description:
+ * Macro that uses field to generate a function with the name lpfc_show_rport_
+ *
+ * lpfc_show_rport_##field: returns the bytes formatted in buf
+ * @cdev: class converted to an fc_rport.
+ * @buf: on return contains the target_field or zero.
+ *
+ * Returns: size of formatted string.
+ **/
+#define lpfc_rport_show_function(field, format_string, sz, cast)	\
+static ssize_t								\
+lpfc_show_rport_##field (struct device *dev,				\
+			 struct device_attribute *attr,			\
+			 char *buf)					\
+{									\
+	struct fc_rport *rport = transport_class_to_rport(dev);		\
+	struct lpfc_rport_data *rdata = rport->hostdata;		\
+	return snprintf(buf, sz, format_string,				\
+		(rdata->target) ? cast rdata->target->field : 0);	\
+}
+
+#define lpfc_rport_rd_attr(field, format_string, sz)			\
+	lpfc_rport_show_function(field, format_string, sz, )		\
+static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
+
+/**
+ * lpfc_set_vport_symbolic_name - Set the vport's symbolic name
+ * @fc_vport: The fc_vport who's symbolic name has been changed.
+ *
+ * Description:
+ * This function is called by the transport after the @fc_vport's symbolic name
+ * has been changed. This function re-registers the symbolic name with the
+ * switch to propagate the change into the fabric if the vport is active.
+ **/
+static void
+lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
+{
+	struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+
+	if (vport->port_state == LPFC_VPORT_READY)
+		lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
+}
+
+/**
+ * lpfc_hba_log_verbose_init - Set hba's log verbose level
+ * @phba: Pointer to lpfc_hba struct.
+ *
+ * This function is called by the lpfc_get_cfgparam() routine to set the
+ * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
+ * log message according to the module's lpfc_log_verbose parameter setting
+ * before hba port or vport created.
+ **/
+static void
+lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
+{
+	phba->cfg_log_verbose = verbose;
+}
+
+struct fc_function_template lpfc_transport_functions = {
+	/* fixed attributes the driver supports */
+	.show_host_node_name = 1,
+	.show_host_port_name = 1,
+	.show_host_supported_classes = 1,
+	.show_host_supported_fc4s = 1,
+	.show_host_supported_speeds = 1,
+	.show_host_maxframe_size = 1,
+	.show_host_symbolic_name = 1,
+
+	/* dynamic attributes the driver supports */
+	.get_host_port_id = lpfc_get_host_port_id,
+	.show_host_port_id = 1,
+
+	.get_host_port_type = lpfc_get_host_port_type,
+	.show_host_port_type = 1,
+
+	.get_host_port_state = lpfc_get_host_port_state,
+	.show_host_port_state = 1,
+
+	/* active_fc4s is shown but doesn't change (thus no get function) */
+	.show_host_active_fc4s = 1,
+
+	.get_host_speed = lpfc_get_host_speed,
+	.show_host_speed = 1,
+
+	.get_host_fabric_name = lpfc_get_host_fabric_name,
+	.show_host_fabric_name = 1,
+
+	/*
+	 * The LPFC driver treats linkdown handling as target loss events
+	 * so there are no sysfs handlers for link_down_tmo.
+	 */
+
+	.get_fc_host_stats = lpfc_get_stats,
+	.reset_fc_host_stats = lpfc_reset_stats,
+
+	.dd_fcrport_size = sizeof(struct lpfc_rport_data),
+	.show_rport_maxframe_size = 1,
+	.show_rport_supported_classes = 1,
+
+	.set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
+	.show_rport_dev_loss_tmo = 1,
+
+	.get_starget_port_id  = lpfc_get_starget_port_id,
+	.show_starget_port_id = 1,
+
+	.get_starget_node_name = lpfc_get_starget_node_name,
+	.show_starget_node_name = 1,
+
+	.get_starget_port_name = lpfc_get_starget_port_name,
+	.show_starget_port_name = 1,
+
+	.issue_fc_host_lip = lpfc_issue_lip,
+	.dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
+	.terminate_rport_io = lpfc_terminate_rport_io,
+
+	.dd_fcvport_size = sizeof(struct lpfc_vport *),
+
+	.vport_disable = lpfc_vport_disable,
+
+	.set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
+
+	.bsg_request = lpfc_bsg_request,
+	.bsg_timeout = lpfc_bsg_timeout,
+};
+
+struct fc_function_template lpfc_vport_transport_functions = {
+	/* fixed attributes the driver supports */
+	.show_host_node_name = 1,
+	.show_host_port_name = 1,
+	.show_host_supported_classes = 1,
+	.show_host_supported_fc4s = 1,
+	.show_host_supported_speeds = 1,
+	.show_host_maxframe_size = 1,
+	.show_host_symbolic_name = 1,
+
+	/* dynamic attributes the driver supports */
+	.get_host_port_id = lpfc_get_host_port_id,
+	.show_host_port_id = 1,
+
+	.get_host_port_type = lpfc_get_host_port_type,
+	.show_host_port_type = 1,
+
+	.get_host_port_state = lpfc_get_host_port_state,
+	.show_host_port_state = 1,
+
+	/* active_fc4s is shown but doesn't change (thus no get function) */
+	.show_host_active_fc4s = 1,
+
+	.get_host_speed = lpfc_get_host_speed,
+	.show_host_speed = 1,
+
+	.get_host_fabric_name = lpfc_get_host_fabric_name,
+	.show_host_fabric_name = 1,
+
+	/*
+	 * The LPFC driver treats linkdown handling as target loss events
+	 * so there are no sysfs handlers for link_down_tmo.
+	 */
+
+	.get_fc_host_stats = lpfc_get_stats,
+	.reset_fc_host_stats = lpfc_reset_stats,
+
+	.dd_fcrport_size = sizeof(struct lpfc_rport_data),
+	.show_rport_maxframe_size = 1,
+	.show_rport_supported_classes = 1,
+
+	.set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
+	.show_rport_dev_loss_tmo = 1,
+
+	.get_starget_port_id  = lpfc_get_starget_port_id,
+	.show_starget_port_id = 1,
+
+	.get_starget_node_name = lpfc_get_starget_node_name,
+	.show_starget_node_name = 1,
+
+	.get_starget_port_name = lpfc_get_starget_port_name,
+	.show_starget_port_name = 1,
+
+	.dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
+	.terminate_rport_io = lpfc_terminate_rport_io,
+
+	.vport_disable = lpfc_vport_disable,
+
+	.set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
+};
+
+/**
+ * lpfc_get_cfgparam - Used during probe_one to init the adapter structure
+ * @phba: lpfc_hba pointer.
+ **/
+void
+lpfc_get_cfgparam(struct lpfc_hba *phba)
+{
+	lpfc_cr_delay_init(phba, lpfc_cr_delay);
+	lpfc_cr_count_init(phba, lpfc_cr_count);
+	lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
+	lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
+	lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
+	lpfc_ack0_init(phba, lpfc_ack0);
+	lpfc_topology_init(phba, lpfc_topology);
+	lpfc_link_speed_init(phba, lpfc_link_speed);
+	lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
+	lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
+	lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
+	lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
+	lpfc_use_msi_init(phba, lpfc_use_msi);
+	lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
+	lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
+	lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
+	lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
+	lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
+	lpfc_enable_bg_init(phba, lpfc_enable_bg);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		phba->cfg_poll = 0;
+	else
+	phba->cfg_poll = lpfc_poll;
+	phba->cfg_soft_wwnn = 0L;
+	phba->cfg_soft_wwpn = 0L;
+	lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
+	lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
+	lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
+	lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
+	lpfc_aer_support_init(phba, lpfc_aer_support);
+	lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
+	lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
+	lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
+	phba->cfg_enable_dss = 1;
+	return;
+}
+
+/**
+ * lpfc_get_vport_cfgparam - Used during port create, init the vport structure
+ * @vport: lpfc_vport pointer.
+ **/
+void
+lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
+{
+	lpfc_log_verbose_init(vport, lpfc_log_verbose);
+	lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth);
+	lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth);
+	lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo);
+	lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo);
+	lpfc_peer_port_login_init(vport, lpfc_peer_port_login);
+	lpfc_restrict_login_init(vport, lpfc_restrict_login);
+	lpfc_fcp_class_init(vport, lpfc_fcp_class);
+	lpfc_use_adisc_init(vport, lpfc_use_adisc);
+	lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
+	lpfc_fdmi_on_init(vport, lpfc_fdmi_on);
+	lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
+	lpfc_max_luns_init(vport, lpfc_max_luns);
+	lpfc_scan_down_init(vport, lpfc_scan_down);
+	lpfc_enable_da_id_init(vport, lpfc_enable_da_id);
+	return;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_bsg.c b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_bsg.c
new file mode 100644
index 0000000..141e4b4
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_bsg.c
@@ -0,0 +1,5195 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2009-2011 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/interrupt.h>
+#include <linux/mempool.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_bsg_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_bsg.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_debugfs.h"
+#include "lpfc_vport.h"
+#include "lpfc_version.h"
+
+struct lpfc_bsg_event {
+	struct list_head node;
+	struct kref kref;
+	wait_queue_head_t wq;
+
+	/* Event type and waiter identifiers */
+	uint32_t type_mask;
+	uint32_t req_id;
+	uint32_t reg_id;
+
+	/* next two flags are here for the auto-delete logic */
+	unsigned long wait_time_stamp;
+	int waiting;
+
+	/* seen and not seen events */
+	struct list_head events_to_get;
+	struct list_head events_to_see;
+
+	/* job waiting for this event to finish */
+	struct fc_bsg_job *set_job;
+};
+
+struct lpfc_bsg_iocb {
+	struct lpfc_iocbq *cmdiocbq;
+	struct lpfc_iocbq *rspiocbq;
+	struct lpfc_dmabuf *bmp;
+	struct lpfc_nodelist *ndlp;
+
+	/* job waiting for this iocb to finish */
+	struct fc_bsg_job *set_job;
+};
+
+struct lpfc_bsg_mbox {
+	LPFC_MBOXQ_t *pmboxq;
+	MAILBOX_t *mb;
+	struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
+	uint8_t *ext; /* extended mailbox data */
+	uint32_t mbOffset; /* from app */
+	uint32_t inExtWLen; /* from app */
+	uint32_t outExtWLen; /* from app */
+
+	/* job waiting for this mbox command to finish */
+	struct fc_bsg_job *set_job;
+};
+
+#define MENLO_DID 0x0000FC0E
+
+struct lpfc_bsg_menlo {
+	struct lpfc_iocbq *cmdiocbq;
+	struct lpfc_iocbq *rspiocbq;
+	struct lpfc_dmabuf *bmp;
+
+	/* job waiting for this iocb to finish */
+	struct fc_bsg_job *set_job;
+};
+
+#define TYPE_EVT 	1
+#define TYPE_IOCB	2
+#define TYPE_MBOX	3
+#define TYPE_MENLO	4
+struct bsg_job_data {
+	uint32_t type;
+	union {
+		struct lpfc_bsg_event *evt;
+		struct lpfc_bsg_iocb iocb;
+		struct lpfc_bsg_mbox mbox;
+		struct lpfc_bsg_menlo menlo;
+	} context_un;
+};
+
+struct event_data {
+	struct list_head node;
+	uint32_t type;
+	uint32_t immed_dat;
+	void *data;
+	uint32_t len;
+};
+
+#define BUF_SZ_4K 4096
+#define SLI_CT_ELX_LOOPBACK 0x10
+
+enum ELX_LOOPBACK_CMD {
+	ELX_LOOPBACK_XRI_SETUP,
+	ELX_LOOPBACK_DATA,
+};
+
+#define ELX_LOOPBACK_HEADER_SZ \
+	(size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
+
+struct lpfc_dmabufext {
+	struct lpfc_dmabuf dma;
+	uint32_t size;
+	uint32_t flag;
+};
+
+/**
+ * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_bsg_send_mgmt_cmd function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from another thread which
+ * cleans up the SLI layer objects.
+ * This function copies the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
+			struct lpfc_iocbq *cmdiocbq,
+			struct lpfc_iocbq *rspiocbq)
+{
+	struct bsg_job_data *dd_data;
+	struct fc_bsg_job *job;
+	IOCB_t *rsp;
+	struct lpfc_dmabuf *bmp;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_bsg_iocb *iocb;
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	dd_data = cmdiocbq->context2;
+	if (!dd_data) {
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		lpfc_sli_release_iocbq(phba, cmdiocbq);
+		return;
+	}
+
+	iocb = &dd_data->context_un.iocb;
+	job = iocb->set_job;
+	job->dd_data = NULL; /* so timeout handler does not reply */
+
+	bmp = iocb->bmp;
+	rsp = &rspiocbq->iocb;
+	ndlp = cmdiocbq->context1;
+
+	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+	if (rsp->ulpStatus) {
+		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+			switch (rsp->un.ulpWord[4] & 0xff) {
+			case IOERR_SEQUENCE_TIMEOUT:
+				rc = -ETIMEDOUT;
+				break;
+			case IOERR_INVALID_RPI:
+				rc = -EFAULT;
+				break;
+			default:
+				rc = -EACCES;
+				break;
+			}
+		} else
+			rc = -EACCES;
+	} else
+		job->reply->reply_payload_rcv_len =
+			rsp->un.genreq64.bdl.bdeSize;
+
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+	lpfc_sli_release_iocbq(phba, cmdiocbq);
+	lpfc_nlp_put(ndlp);
+	kfree(bmp);
+	kfree(dd_data);
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	/* complete the job back to userspace */
+	job->job_done(job);
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+	return;
+}
+
+/**
+ * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
+ * @job: fc_bsg_job to handle
+ **/
+static int
+lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_rport_data *rdata = job->rport->dd_data;
+	struct lpfc_nodelist *ndlp = rdata->pnode;
+	struct ulp_bde64 *bpl = NULL;
+	uint32_t timeout;
+	struct lpfc_iocbq *cmdiocbq = NULL;
+	IOCB_t *cmd;
+	struct lpfc_dmabuf *bmp = NULL;
+	int request_nseg;
+	int reply_nseg;
+	struct scatterlist *sgel = NULL;
+	int numbde;
+	dma_addr_t busaddr;
+	struct bsg_job_data *dd_data;
+	uint32_t creg_val;
+	int rc = 0;
+	int iocb_stat;
+
+	/* in case no data is transferred */
+	job->reply->reply_payload_rcv_len = 0;
+
+	/* allocate our bsg tracking structure */
+	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+	if (!dd_data) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2733 Failed allocation of dd_data\n");
+		rc = -ENOMEM;
+		goto no_dd_data;
+	}
+
+	if (!lpfc_nlp_get(ndlp)) {
+		rc = -ENODEV;
+		goto no_ndlp;
+	}
+
+	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!bmp) {
+		rc = -ENOMEM;
+		goto free_ndlp;
+	}
+
+	if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
+		rc = -ENODEV;
+		goto free_bmp;
+	}
+
+	cmdiocbq = lpfc_sli_get_iocbq(phba);
+	if (!cmdiocbq) {
+		rc = -ENOMEM;
+		goto free_bmp;
+	}
+
+	cmd = &cmdiocbq->iocb;
+	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+	if (!bmp->virt) {
+		rc = -ENOMEM;
+		goto free_cmdiocbq;
+	}
+
+	INIT_LIST_HEAD(&bmp->list);
+	bpl = (struct ulp_bde64 *) bmp->virt;
+	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
+				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
+		busaddr = sg_dma_address(sgel);
+		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		bpl->tus.f.bdeSize = sg_dma_len(sgel);
+		bpl->tus.w = cpu_to_le32(bpl->tus.w);
+		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+		bpl++;
+	}
+
+	reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
+				job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
+		busaddr = sg_dma_address(sgel);
+		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+		bpl->tus.f.bdeSize = sg_dma_len(sgel);
+		bpl->tus.w = cpu_to_le32(bpl->tus.w);
+		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+		bpl++;
+	}
+
+	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
+	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+	cmd->un.genreq64.bdl.bdeSize =
+		(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
+	cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+	cmd->un.genreq64.w5.hcsw.Dfctl = 0;
+	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+	cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
+	cmd->ulpBdeCount = 1;
+	cmd->ulpLe = 1;
+	cmd->ulpClass = CLASS3;
+	cmd->ulpContext = ndlp->nlp_rpi;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+	cmd->ulpOwner = OWN_CHIP;
+	cmdiocbq->vport = phba->pport;
+	cmdiocbq->context3 = bmp;
+	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+	timeout = phba->fc_ratov * 2;
+	cmd->ulpTimeout = timeout;
+
+	cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
+	cmdiocbq->context1 = ndlp;
+	cmdiocbq->context2 = dd_data;
+	dd_data->type = TYPE_IOCB;
+	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
+	dd_data->context_un.iocb.set_job = job;
+	dd_data->context_un.iocb.bmp = bmp;
+
+	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+			rc = -EIO ;
+			goto free_cmdiocbq;
+		}
+		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+		writel(creg_val, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+	}
+
+	iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
+	if (iocb_stat == IOCB_SUCCESS)
+		return 0; /* done for now */
+	else if (iocb_stat == IOCB_BUSY)
+		rc = -EAGAIN;
+	else
+		rc = -EIO;
+
+
+	/* iocb failed so cleanup */
+	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+
+free_cmdiocbq:
+	lpfc_sli_release_iocbq(phba, cmdiocbq);
+free_bmp:
+	kfree(bmp);
+free_ndlp:
+	lpfc_nlp_put(ndlp);
+no_ndlp:
+	kfree(dd_data);
+no_dd_data:
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	job->dd_data = NULL;
+	return rc;
+}
+
+/**
+ * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_bsg_rport_els_cmp function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from other thread which
+ * cleans up the SLI layer objects.
+ * This function copies the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
+			struct lpfc_iocbq *cmdiocbq,
+			struct lpfc_iocbq *rspiocbq)
+{
+	struct bsg_job_data *dd_data;
+	struct fc_bsg_job *job;
+	IOCB_t *rsp;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_dmabuf *pbuflist = NULL;
+	struct fc_bsg_ctels_reply *els_reply;
+	uint8_t *rjt_data;
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	dd_data = cmdiocbq->context1;
+	/* normal completion and timeout crossed paths, already done */
+	if (!dd_data) {
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		return;
+	}
+
+	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
+	if (cmdiocbq->context2 && rspiocbq)
+		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
+		       &rspiocbq->iocb, sizeof(IOCB_t));
+
+	job = dd_data->context_un.iocb.set_job;
+	cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
+	rspiocbq = dd_data->context_un.iocb.rspiocbq;
+	rsp = &rspiocbq->iocb;
+	ndlp = dd_data->context_un.iocb.ndlp;
+
+	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+	if (job->reply->result == -EAGAIN)
+		rc = -EAGAIN;
+	else if (rsp->ulpStatus == IOSTAT_SUCCESS)
+		job->reply->reply_payload_rcv_len =
+			rsp->un.elsreq64.bdl.bdeSize;
+	else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
+		job->reply->reply_payload_rcv_len =
+			sizeof(struct fc_bsg_ctels_reply);
+		/* LS_RJT data returned in word 4 */
+		rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
+		els_reply = &job->reply->reply_data.ctels_reply;
+		els_reply->status = FC_CTELS_STATUS_REJECT;
+		els_reply->rjt_data.action = rjt_data[3];
+		els_reply->rjt_data.reason_code = rjt_data[2];
+		els_reply->rjt_data.reason_explanation = rjt_data[1];
+		els_reply->rjt_data.vendor_unique = rjt_data[0];
+	} else
+		rc = -EIO;
+
+	pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
+	lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
+	lpfc_sli_release_iocbq(phba, rspiocbq);
+	lpfc_sli_release_iocbq(phba, cmdiocbq);
+	lpfc_nlp_put(ndlp);
+	kfree(dd_data);
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	job->dd_data = NULL;
+	/* complete the job back to userspace */
+	job->job_done(job);
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+	return;
+}
+
+/**
+ * lpfc_bsg_rport_els - send an ELS command from a bsg request
+ * @job: fc_bsg_job to handle
+ **/
+static int
+lpfc_bsg_rport_els(struct fc_bsg_job *job)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_rport_data *rdata = job->rport->dd_data;
+	struct lpfc_nodelist *ndlp = rdata->pnode;
+	uint32_t elscmd;
+	uint32_t cmdsize;
+	uint32_t rspsize;
+	struct lpfc_iocbq *rspiocbq;
+	struct lpfc_iocbq *cmdiocbq;
+	IOCB_t *rsp;
+	uint16_t rpi = 0;
+	struct lpfc_dmabuf *pcmd;
+	struct lpfc_dmabuf *prsp;
+	struct lpfc_dmabuf *pbuflist = NULL;
+	struct ulp_bde64 *bpl;
+	int request_nseg;
+	int reply_nseg;
+	struct scatterlist *sgel = NULL;
+	int numbde;
+	dma_addr_t busaddr;
+	struct bsg_job_data *dd_data;
+	uint32_t creg_val;
+	int rc = 0;
+
+	/* in case no data is transferred */
+	job->reply->reply_payload_rcv_len = 0;
+
+	/* allocate our bsg tracking structure */
+	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+	if (!dd_data) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2735 Failed allocation of dd_data\n");
+		rc = -ENOMEM;
+		goto no_dd_data;
+	}
+
+	if (!lpfc_nlp_get(ndlp)) {
+		rc = -ENODEV;
+		goto free_dd_data;
+	}
+
+	elscmd = job->request->rqst_data.r_els.els_code;
+	cmdsize = job->request_payload.payload_len;
+	rspsize = job->reply_payload.payload_len;
+	rspiocbq = lpfc_sli_get_iocbq(phba);
+	if (!rspiocbq) {
+		lpfc_nlp_put(ndlp);
+		rc = -ENOMEM;
+		goto free_dd_data;
+	}
+
+	rsp = &rspiocbq->iocb;
+	rpi = ndlp->nlp_rpi;
+
+	cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
+				      ndlp->nlp_DID, elscmd);
+	if (!cmdiocbq) {
+		rc = -EIO;
+		goto free_rspiocbq;
+	}
+
+	/* prep els iocb set context1 to the ndlp, context2 to the command
+	 * dmabuf, context3 holds the data dmabuf
+	 */
+	pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
+	prsp = (struct lpfc_dmabuf *) pcmd->list.next;
+	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
+	kfree(pcmd);
+	lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
+	kfree(prsp);
+	cmdiocbq->context2 = NULL;
+
+	pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
+	bpl = (struct ulp_bde64 *) pbuflist->virt;
+
+	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
+				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
+		busaddr = sg_dma_address(sgel);
+		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		bpl->tus.f.bdeSize = sg_dma_len(sgel);
+		bpl->tus.w = cpu_to_le32(bpl->tus.w);
+		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+		bpl++;
+	}
+
+	reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
+				job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
+		busaddr = sg_dma_address(sgel);
+		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+		bpl->tus.f.bdeSize = sg_dma_len(sgel);
+		bpl->tus.w = cpu_to_le32(bpl->tus.w);
+		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+		bpl++;
+	}
+	cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
+		(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
+	else
+		cmdiocbq->iocb.ulpContext = rpi;
+	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+	cmdiocbq->context1 = NULL;
+	cmdiocbq->context2 = NULL;
+
+	cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
+	cmdiocbq->context1 = dd_data;
+	cmdiocbq->context2 = rspiocbq;
+	dd_data->type = TYPE_IOCB;
+	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
+	dd_data->context_un.iocb.rspiocbq = rspiocbq;
+	dd_data->context_un.iocb.set_job = job;
+	dd_data->context_un.iocb.bmp = NULL;
+	dd_data->context_un.iocb.ndlp = ndlp;
+
+	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+			rc = -EIO;
+			goto linkdown_err;
+		}
+		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+		writel(creg_val, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+	}
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
+	lpfc_nlp_put(ndlp);
+	if (rc == IOCB_SUCCESS)
+		return 0; /* done for now */
+	else if (rc == IOCB_BUSY)
+		rc = -EAGAIN;
+	else
+		rc = -EIO;
+
+linkdown_err:
+	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+	lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
+
+	lpfc_sli_release_iocbq(phba, cmdiocbq);
+
+free_rspiocbq:
+	lpfc_sli_release_iocbq(phba, rspiocbq);
+
+free_dd_data:
+	kfree(dd_data);
+
+no_dd_data:
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	job->dd_data = NULL;
+	return rc;
+}
+
+/**
+ * lpfc_bsg_event_free - frees an allocated event structure
+ * @kref: Pointer to a kref.
+ *
+ * Called from kref_put. Back cast the kref into an event structure address.
+ * Free any events to get, delete associated nodes, free any events to see,
+ * free any data then free the event itself.
+ **/
+static void
+lpfc_bsg_event_free(struct kref *kref)
+{
+	struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
+						  kref);
+	struct event_data *ed;
+
+	list_del(&evt->node);
+
+	while (!list_empty(&evt->events_to_get)) {
+		ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
+		list_del(&ed->node);
+		kfree(ed->data);
+		kfree(ed);
+	}
+
+	while (!list_empty(&evt->events_to_see)) {
+		ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
+		list_del(&ed->node);
+		kfree(ed->data);
+		kfree(ed);
+	}
+
+	kfree(evt);
+}
+
+/**
+ * lpfc_bsg_event_ref - increments the kref for an event
+ * @evt: Pointer to an event structure.
+ **/
+static inline void
+lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
+{
+	kref_get(&evt->kref);
+}
+
+/**
+ * lpfc_bsg_event_unref - Uses kref_put to free an event structure
+ * @evt: Pointer to an event structure.
+ **/
+static inline void
+lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
+{
+	kref_put(&evt->kref, lpfc_bsg_event_free);
+}
+
+/**
+ * lpfc_bsg_event_new - allocate and initialize a event structure
+ * @ev_mask: Mask of events.
+ * @ev_reg_id: Event reg id.
+ * @ev_req_id: Event request id.
+ **/
+static struct lpfc_bsg_event *
+lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
+{
+	struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
+
+	if (!evt)
+		return NULL;
+
+	INIT_LIST_HEAD(&evt->events_to_get);
+	INIT_LIST_HEAD(&evt->events_to_see);
+	evt->type_mask = ev_mask;
+	evt->req_id = ev_req_id;
+	evt->reg_id = ev_reg_id;
+	evt->wait_time_stamp = jiffies;
+	init_waitqueue_head(&evt->wq);
+	kref_init(&evt->kref);
+	return evt;
+}
+
+/**
+ * diag_cmd_data_free - Frees an lpfc dma buffer extension
+ * @phba: Pointer to HBA context object.
+ * @mlist: Pointer to an lpfc dma buffer extension.
+ **/
+static int
+diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
+{
+	struct lpfc_dmabufext *mlast;
+	struct pci_dev *pcidev;
+	struct list_head head, *curr, *next;
+
+	if ((!mlist) || (!lpfc_is_link_up(phba) &&
+		(phba->link_flag & LS_LOOPBACK_MODE))) {
+		return 0;
+	}
+
+	pcidev = phba->pcidev;
+	list_add_tail(&head, &mlist->dma.list);
+
+	list_for_each_safe(curr, next, &head) {
+		mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
+		if (mlast->dma.virt)
+			dma_free_coherent(&pcidev->dev,
+					  mlast->size,
+					  mlast->dma.virt,
+					  mlast->dma.phys);
+		kfree(mlast);
+	}
+	return 0;
+}
+
+/**
+ * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
+ * @phba:
+ * @pring:
+ * @piocbq:
+ *
+ * This function is called when an unsolicited CT command is received.  It
+ * forwards the event to any processes registered to receive CT events.
+ **/
+int
+lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			struct lpfc_iocbq *piocbq)
+{
+	uint32_t evt_req_id = 0;
+	uint32_t cmd;
+	uint32_t len;
+	struct lpfc_dmabuf *dmabuf = NULL;
+	struct lpfc_bsg_event *evt;
+	struct event_data *evt_dat = NULL;
+	struct lpfc_iocbq *iocbq;
+	size_t offset = 0;
+	struct list_head head;
+	struct ulp_bde64 *bde;
+	dma_addr_t dma_addr;
+	int i;
+	struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
+	struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
+	struct lpfc_hbq_entry *hbqe;
+	struct lpfc_sli_ct_request *ct_req;
+	struct fc_bsg_job *job = NULL;
+	unsigned long flags;
+	int size = 0;
+
+	INIT_LIST_HEAD(&head);
+	list_add_tail(&head, &piocbq->list);
+
+	if (piocbq->iocb.ulpBdeCount == 0 ||
+	    piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
+		goto error_ct_unsol_exit;
+
+	if (phba->link_state == LPFC_HBA_ERROR ||
+		(!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
+		goto error_ct_unsol_exit;
+
+	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
+		dmabuf = bdeBuf1;
+	else {
+		dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
+				    piocbq->iocb.un.cont64[0].addrLow);
+		dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
+	}
+	if (dmabuf == NULL)
+		goto error_ct_unsol_exit;
+	ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
+	evt_req_id = ct_req->FsType;
+	cmd = ct_req->CommandResponse.bits.CmdRsp;
+	len = ct_req->CommandResponse.bits.Size;
+	if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
+		lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
+		if (!(evt->type_mask & FC_REG_CT_EVENT) ||
+			evt->req_id != evt_req_id)
+			continue;
+
+		lpfc_bsg_event_ref(evt);
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
+		if (evt_dat == NULL) {
+			spin_lock_irqsave(&phba->ct_ev_lock, flags);
+			lpfc_bsg_event_unref(evt);
+			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+					"2614 Memory allocation failed for "
+					"CT event\n");
+			break;
+		}
+
+		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+			/* take accumulated byte count from the last iocbq */
+			iocbq = list_entry(head.prev, typeof(*iocbq), list);
+			evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
+		} else {
+			list_for_each_entry(iocbq, &head, list) {
+				for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
+					evt_dat->len +=
+					iocbq->iocb.un.cont64[i].tus.f.bdeSize;
+			}
+		}
+
+		evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
+		if (evt_dat->data == NULL) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+					"2615 Memory allocation failed for "
+					"CT event data, size %d\n",
+					evt_dat->len);
+			kfree(evt_dat);
+			spin_lock_irqsave(&phba->ct_ev_lock, flags);
+			lpfc_bsg_event_unref(evt);
+			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+			goto error_ct_unsol_exit;
+		}
+
+		list_for_each_entry(iocbq, &head, list) {
+			size = 0;
+			if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+				bdeBuf1 = iocbq->context2;
+				bdeBuf2 = iocbq->context3;
+			}
+			for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
+				if (phba->sli3_options &
+				    LPFC_SLI3_HBQ_ENABLED) {
+					if (i == 0) {
+						hbqe = (struct lpfc_hbq_entry *)
+						  &iocbq->iocb.un.ulpWord[0];
+						size = hbqe->bde.tus.f.bdeSize;
+						dmabuf = bdeBuf1;
+					} else if (i == 1) {
+						hbqe = (struct lpfc_hbq_entry *)
+							&iocbq->iocb.unsli3.
+							sli3Words[4];
+						size = hbqe->bde.tus.f.bdeSize;
+						dmabuf = bdeBuf2;
+					}
+					if ((offset + size) > evt_dat->len)
+						size = evt_dat->len - offset;
+				} else {
+					size = iocbq->iocb.un.cont64[i].
+						tus.f.bdeSize;
+					bde = &iocbq->iocb.un.cont64[i];
+					dma_addr = getPaddr(bde->addrHigh,
+							    bde->addrLow);
+					dmabuf = lpfc_sli_ringpostbuf_get(phba,
+							pring, dma_addr);
+				}
+				if (!dmabuf) {
+					lpfc_printf_log(phba, KERN_ERR,
+						LOG_LIBDFC, "2616 No dmabuf "
+						"found for iocbq 0x%p\n",
+						iocbq);
+					kfree(evt_dat->data);
+					kfree(evt_dat);
+					spin_lock_irqsave(&phba->ct_ev_lock,
+						flags);
+					lpfc_bsg_event_unref(evt);
+					spin_unlock_irqrestore(
+						&phba->ct_ev_lock, flags);
+					goto error_ct_unsol_exit;
+				}
+				memcpy((char *)(evt_dat->data) + offset,
+				       dmabuf->virt, size);
+				offset += size;
+				if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
+				    !(phba->sli3_options &
+				      LPFC_SLI3_HBQ_ENABLED)) {
+					lpfc_sli_ringpostbuf_put(phba, pring,
+								 dmabuf);
+				} else {
+					switch (cmd) {
+					case ELX_LOOPBACK_DATA:
+						if (phba->sli_rev <
+						    LPFC_SLI_REV4)
+							diag_cmd_data_free(phba,
+							(struct lpfc_dmabufext
+							 *)dmabuf);
+						break;
+					case ELX_LOOPBACK_XRI_SETUP:
+						if ((phba->sli_rev ==
+							LPFC_SLI_REV2) ||
+							(phba->sli3_options &
+							LPFC_SLI3_HBQ_ENABLED
+							)) {
+							lpfc_in_buf_free(phba,
+									dmabuf);
+						} else {
+							lpfc_post_buffer(phba,
+									 pring,
+									 1);
+						}
+						break;
+					default:
+						if (!(phba->sli3_options &
+						      LPFC_SLI3_HBQ_ENABLED))
+							lpfc_post_buffer(phba,
+									 pring,
+									 1);
+						break;
+					}
+				}
+			}
+		}
+
+		spin_lock_irqsave(&phba->ct_ev_lock, flags);
+		if (phba->sli_rev == LPFC_SLI_REV4) {
+			evt_dat->immed_dat = phba->ctx_idx;
+			phba->ctx_idx = (phba->ctx_idx + 1) % 64;
+			/* Provide warning for over-run of the ct_ctx array */
+			if (phba->ct_ctx[evt_dat->immed_dat].flags &
+			    UNSOL_VALID)
+				lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+						"2717 CT context array entry "
+						"[%d] over-run: oxid:x%x, "
+						"sid:x%x\n", phba->ctx_idx,
+						phba->ct_ctx[
+						    evt_dat->immed_dat].oxid,
+						phba->ct_ctx[
+						    evt_dat->immed_dat].SID);
+			phba->ct_ctx[evt_dat->immed_dat].rxid =
+				piocbq->iocb.ulpContext;
+			phba->ct_ctx[evt_dat->immed_dat].oxid =
+				piocbq->iocb.unsli3.rcvsli3.ox_id;
+			phba->ct_ctx[evt_dat->immed_dat].SID =
+				piocbq->iocb.un.rcvels.remoteID;
+			phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
+		} else
+			evt_dat->immed_dat = piocbq->iocb.ulpContext;
+
+		evt_dat->type = FC_REG_CT_EVENT;
+		list_add(&evt_dat->node, &evt->events_to_see);
+		if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
+			wake_up_interruptible(&evt->wq);
+			lpfc_bsg_event_unref(evt);
+			break;
+		}
+
+		list_move(evt->events_to_see.prev, &evt->events_to_get);
+		lpfc_bsg_event_unref(evt);
+
+		job = evt->set_job;
+		evt->set_job = NULL;
+		if (job) {
+			job->reply->reply_payload_rcv_len = size;
+			/* make error code available to userspace */
+			job->reply->result = 0;
+			job->dd_data = NULL;
+			/* complete the job back to userspace */
+			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+			job->job_done(job);
+			spin_lock_irqsave(&phba->ct_ev_lock, flags);
+		}
+	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+error_ct_unsol_exit:
+	if (!list_empty(&head))
+		list_del(&head);
+	if ((phba->sli_rev < LPFC_SLI_REV4) &&
+	    (evt_req_id == SLI_CT_ELX_LOOPBACK))
+		return 0;
+	return 1;
+}
+
+/**
+ * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
+ * @job: SET_EVENT fc_bsg_job
+ **/
+static int
+lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct set_ct_event *event_req;
+	struct lpfc_bsg_event *evt;
+	int rc = 0;
+	struct bsg_job_data *dd_data = NULL;
+	uint32_t ev_mask;
+	unsigned long flags;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2612 Received SET_CT_EVENT below minimum "
+				"size\n");
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+	if (dd_data == NULL) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2734 Failed allocation of dd_data\n");
+		rc = -ENOMEM;
+		goto job_error;
+	}
+
+	event_req = (struct set_ct_event *)
+		job->request->rqst_data.h_vendor.vendor_cmd;
+	ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
+				FC_REG_EVENT_MASK);
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
+		if (evt->reg_id == event_req->ev_reg_id) {
+			lpfc_bsg_event_ref(evt);
+			evt->wait_time_stamp = jiffies;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	if (&evt->node == &phba->ct_ev_waiters) {
+		/* no event waiting struct yet - first call */
+		evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
+					event_req->ev_req_id);
+		if (!evt) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+					"2617 Failed allocation of event "
+					"waiter\n");
+			rc = -ENOMEM;
+			goto job_error;
+		}
+
+		spin_lock_irqsave(&phba->ct_ev_lock, flags);
+		list_add(&evt->node, &phba->ct_ev_waiters);
+		lpfc_bsg_event_ref(evt);
+		evt->wait_time_stamp = jiffies;
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+	}
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	evt->waiting = 1;
+	dd_data->type = TYPE_EVT;
+	dd_data->context_un.evt = evt;
+	evt->set_job = job; /* for unsolicited command */
+	job->dd_data = dd_data; /* for fc transport timeout callback*/
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+	return 0; /* call job done later */
+
+job_error:
+	if (dd_data != NULL)
+		kfree(dd_data);
+
+	job->dd_data = NULL;
+	return rc;
+}
+
+/**
+ * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
+ * @job: GET_EVENT fc_bsg_job
+ **/
+static int
+lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct get_ct_event *event_req;
+	struct get_ct_event_reply *event_reply;
+	struct lpfc_bsg_event *evt;
+	struct event_data *evt_dat = NULL;
+	unsigned long flags;
+	uint32_t rc = 0;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2613 Received GET_CT_EVENT request below "
+				"minimum size\n");
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	event_req = (struct get_ct_event *)
+		job->request->rqst_data.h_vendor.vendor_cmd;
+
+	event_reply = (struct get_ct_event_reply *)
+		job->reply->reply_data.vendor_reply.vendor_rsp;
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
+		if (evt->reg_id == event_req->ev_reg_id) {
+			if (list_empty(&evt->events_to_get))
+				break;
+			lpfc_bsg_event_ref(evt);
+			evt->wait_time_stamp = jiffies;
+			evt_dat = list_entry(evt->events_to_get.prev,
+					     struct event_data, node);
+			list_del(&evt_dat->node);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	/* The app may continue to ask for event data until it gets
+	 * an error indicating that there isn't anymore
+	 */
+	if (evt_dat == NULL) {
+		job->reply->reply_payload_rcv_len = 0;
+		rc = -ENOENT;
+		goto job_error;
+	}
+
+	if (evt_dat->len > job->request_payload.payload_len) {
+		evt_dat->len = job->request_payload.payload_len;
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2618 Truncated event data at %d "
+				"bytes\n",
+				job->request_payload.payload_len);
+	}
+
+	event_reply->type = evt_dat->type;
+	event_reply->immed_data = evt_dat->immed_dat;
+	if (evt_dat->len > 0)
+		job->reply->reply_payload_rcv_len =
+			sg_copy_from_buffer(job->request_payload.sg_list,
+					    job->request_payload.sg_cnt,
+					    evt_dat->data, evt_dat->len);
+	else
+		job->reply->reply_payload_rcv_len = 0;
+
+	if (evt_dat) {
+		kfree(evt_dat->data);
+		kfree(evt_dat);
+	}
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	lpfc_bsg_event_unref(evt);
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+	job->dd_data = NULL;
+	job->reply->result = 0;
+	job->job_done(job);
+	return 0;
+
+job_error:
+	job->dd_data = NULL;
+	job->reply->result = rc;
+	return rc;
+}
+
+/**
+ * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_issue_ct_rsp_cmp function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from other thread which
+ * cleans up the SLI layer objects.
+ * This function copy the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
+			struct lpfc_iocbq *cmdiocbq,
+			struct lpfc_iocbq *rspiocbq)
+{
+	struct bsg_job_data *dd_data;
+	struct fc_bsg_job *job;
+	IOCB_t *rsp;
+	struct lpfc_dmabuf *bmp;
+	struct lpfc_nodelist *ndlp;
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	dd_data = cmdiocbq->context2;
+	/* normal completion and timeout crossed paths, already done */
+	if (!dd_data) {
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		return;
+	}
+
+	job = dd_data->context_un.iocb.set_job;
+	bmp = dd_data->context_un.iocb.bmp;
+	rsp = &rspiocbq->iocb;
+	ndlp = dd_data->context_un.iocb.ndlp;
+
+	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+	if (rsp->ulpStatus) {
+		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+			switch (rsp->un.ulpWord[4] & 0xff) {
+			case IOERR_SEQUENCE_TIMEOUT:
+				rc = -ETIMEDOUT;
+				break;
+			case IOERR_INVALID_RPI:
+				rc = -EFAULT;
+				break;
+			default:
+				rc = -EACCES;
+				break;
+			}
+		} else
+			rc = -EACCES;
+	} else
+		job->reply->reply_payload_rcv_len =
+			rsp->un.genreq64.bdl.bdeSize;
+
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+	lpfc_sli_release_iocbq(phba, cmdiocbq);
+	lpfc_nlp_put(ndlp);
+	kfree(bmp);
+	kfree(dd_data);
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	job->dd_data = NULL;
+	/* complete the job back to userspace */
+	job->job_done(job);
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+	return;
+}
+
+/**
+ * lpfc_issue_ct_rsp - issue a ct response
+ * @phba: Pointer to HBA context object.
+ * @job: Pointer to the job object.
+ * @tag: tag index value into the ports context exchange array.
+ * @bmp: Pointer to a dma buffer descriptor.
+ * @num_entry: Number of enties in the bde.
+ **/
+static int
+lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
+		  struct lpfc_dmabuf *bmp, int num_entry)
+{
+	IOCB_t *icmd;
+	struct lpfc_iocbq *ctiocb = NULL;
+	int rc = 0;
+	struct lpfc_nodelist *ndlp = NULL;
+	struct bsg_job_data *dd_data;
+	uint32_t creg_val;
+
+	/* allocate our bsg tracking structure */
+	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+	if (!dd_data) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2736 Failed allocation of dd_data\n");
+		rc = -ENOMEM;
+		goto no_dd_data;
+	}
+
+	/* Allocate buffer for  command iocb */
+	ctiocb = lpfc_sli_get_iocbq(phba);
+	if (!ctiocb) {
+		rc = -ENOMEM;
+		goto no_ctiocb;
+	}
+
+	icmd = &ctiocb->iocb;
+	icmd->un.xseq64.bdl.ulpIoTag32 = 0;
+	icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+	icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
+	icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+	icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
+	icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
+	icmd->un.xseq64.w5.hcsw.Dfctl = 0;
+	icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
+	icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
+
+	/* Fill in rest of iocb */
+	icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
+	icmd->ulpBdeCount = 1;
+	icmd->ulpLe = 1;
+	icmd->ulpClass = CLASS3;
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		/* Do not issue unsol response if oxid not marked as valid */
+		if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
+			rc = IOCB_ERROR;
+			goto issue_ct_rsp_exit;
+		}
+		icmd->ulpContext = phba->ct_ctx[tag].rxid;
+		icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
+		ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
+		if (!ndlp) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+				 "2721 ndlp null for oxid %x SID %x\n",
+					icmd->ulpContext,
+					phba->ct_ctx[tag].SID);
+			rc = IOCB_ERROR;
+			goto issue_ct_rsp_exit;
+		}
+
+		/* Check if the ndlp is active */
+		if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+			rc = -IOCB_ERROR;
+			goto issue_ct_rsp_exit;
+		}
+
+		/* get a refernece count so the ndlp doesn't go away while
+		 * we respond
+		 */
+		if (!lpfc_nlp_get(ndlp)) {
+			rc = -IOCB_ERROR;
+			goto issue_ct_rsp_exit;
+		}
+
+		icmd->un.ulpWord[3] =
+				phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+
+		/* The exchange is done, mark the entry as invalid */
+		phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
+	} else
+		icmd->ulpContext = (ushort) tag;
+
+	icmd->ulpTimeout = phba->fc_ratov * 2;
+
+	/* Xmit CT response on exchange <xid> */
+	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+		"2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
+		icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
+
+	ctiocb->iocb_cmpl = NULL;
+	ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
+	ctiocb->vport = phba->pport;
+	ctiocb->context3 = bmp;
+
+	ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
+	ctiocb->context2 = dd_data;
+	ctiocb->context1 = ndlp;
+	dd_data->type = TYPE_IOCB;
+	dd_data->context_un.iocb.cmdiocbq = ctiocb;
+	dd_data->context_un.iocb.rspiocbq = NULL;
+	dd_data->context_un.iocb.set_job = job;
+	dd_data->context_un.iocb.bmp = bmp;
+	dd_data->context_un.iocb.ndlp = ndlp;
+
+	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
+			rc = -IOCB_ERROR;
+			goto issue_ct_rsp_exit;
+		}
+		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+		writel(creg_val, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+	}
+
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
+
+	if (rc == IOCB_SUCCESS)
+		return 0; /* done for now */
+
+issue_ct_rsp_exit:
+	lpfc_sli_release_iocbq(phba, ctiocb);
+no_ctiocb:
+	kfree(dd_data);
+no_dd_data:
+	return rc;
+}
+
+/**
+ * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
+ * @job: SEND_MGMT_RESP fc_bsg_job
+ **/
+static int
+lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
+		job->request->rqst_data.h_vendor.vendor_cmd;
+	struct ulp_bde64 *bpl;
+	struct lpfc_dmabuf *bmp = NULL;
+	struct scatterlist *sgel = NULL;
+	int request_nseg;
+	int numbde;
+	dma_addr_t busaddr;
+	uint32_t tag = mgmt_resp->tag;
+	unsigned long reqbfrcnt =
+			(unsigned long)job->request_payload.payload_len;
+	int rc = 0;
+
+	/* in case no data is transferred */
+	job->reply->reply_payload_rcv_len = 0;
+
+	if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
+		rc = -ERANGE;
+		goto send_mgmt_rsp_exit;
+	}
+
+	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!bmp) {
+		rc = -ENOMEM;
+		goto send_mgmt_rsp_exit;
+	}
+
+	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+	if (!bmp->virt) {
+		rc = -ENOMEM;
+		goto send_mgmt_rsp_free_bmp;
+	}
+
+	INIT_LIST_HEAD(&bmp->list);
+	bpl = (struct ulp_bde64 *) bmp->virt;
+	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
+				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
+		busaddr = sg_dma_address(sgel);
+		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		bpl->tus.f.bdeSize = sg_dma_len(sgel);
+		bpl->tus.w = cpu_to_le32(bpl->tus.w);
+		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+		bpl++;
+	}
+
+	rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
+
+	if (rc == IOCB_SUCCESS)
+		return 0; /* done for now */
+
+	/* TBD need to handle a timeout */
+	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+			  job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	rc = -EACCES;
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+
+send_mgmt_rsp_free_bmp:
+	kfree(bmp);
+send_mgmt_rsp_exit:
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	job->dd_data = NULL;
+	return rc;
+}
+
+/**
+ * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is responsible for preparing driver for diag loopback
+ * on device.
+ */
+static int
+lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	struct Scsi_Host *shost;
+	struct lpfc_sli *psli;
+	struct lpfc_sli_ring *pring;
+	int i = 0;
+
+	psli = &phba->sli;
+	if (!psli)
+		return -ENODEV;
+
+	pring = &psli->ring[LPFC_FCP_RING];
+	if (!pring)
+		return -ENODEV;
+
+	if ((phba->link_state == LPFC_HBA_ERROR) ||
+	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
+	    (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
+		return -EACCES;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports) {
+		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+			shost = lpfc_shost_from_vport(vports[i]);
+			scsi_block_requests(shost);
+		}
+		lpfc_destroy_vport_work_array(phba, vports);
+	} else {
+		shost = lpfc_shost_from_vport(phba->pport);
+		scsi_block_requests(shost);
+	}
+
+	while (pring->txcmplq_cnt) {
+		if (i++ > 500)  /* wait up to 5 seconds */
+			break;
+		msleep(10);
+	}
+	return 0;
+}
+
+/**
+ * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is responsible for driver exit processing of setting up
+ * diag loopback mode on device.
+ */
+static void
+lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
+{
+	struct Scsi_Host *shost;
+	struct lpfc_vport **vports;
+	int i;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports) {
+		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+			shost = lpfc_shost_from_vport(vports[i]);
+			scsi_unblock_requests(shost);
+		}
+		lpfc_destroy_vport_work_array(phba, vports);
+	} else {
+		shost = lpfc_shost_from_vport(phba->pport);
+		scsi_unblock_requests(shost);
+	}
+	return;
+}
+
+/**
+ * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for placing an sli3  port into diagnostic
+ * loopback mode in order to perform a diagnostic loopback test.
+ * All new scsi requests are blocked, a small delay is used to allow the
+ * scsi requests to complete then the link is brought down. If the link is
+ * is placed in loopback mode then scsi requests are again allowed
+ * so the scsi mid-layer doesn't give up on the port.
+ * All of this is done in-line.
+ */
+static int
+lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+	struct diag_mode_set *loopback_mode;
+	uint32_t link_flags;
+	uint32_t timeout;
+	LPFC_MBOXQ_t *pmboxq  = NULL;
+	int mbxstatus = MBX_SUCCESS;
+	int i = 0;
+	int rc = 0;
+
+	/* no data to return just the return code */
+	job->reply->reply_payload_rcv_len = 0;
+
+	if (job->request_len < sizeof(struct fc_bsg_request) +
+	    sizeof(struct diag_mode_set)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2738 Received DIAG MODE request size:%d "
+				"below the minimum size:%d\n",
+				job->request_len,
+				(int)(sizeof(struct fc_bsg_request) +
+				sizeof(struct diag_mode_set)));
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	rc = lpfc_bsg_diag_mode_enter(phba);
+	if (rc)
+		goto job_error;
+
+	/* bring the link to diagnostic mode */
+	loopback_mode = (struct diag_mode_set *)
+		job->request->rqst_data.h_vendor.vendor_cmd;
+	link_flags = loopback_mode->type;
+	timeout = loopback_mode->timeout * 100;
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq) {
+		rc = -ENOMEM;
+		goto loopback_mode_exit;
+	}
+	memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+	pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
+	pmboxq->u.mb.mbxOwner = OWN_HOST;
+
+	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+
+	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
+		/* wait for link down before proceeding */
+		i = 0;
+		while (phba->link_state != LPFC_LINK_DOWN) {
+			if (i++ > timeout) {
+				rc = -ETIMEDOUT;
+				goto loopback_mode_exit;
+			}
+			msleep(10);
+		}
+
+		memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+		if (link_flags == INTERNAL_LOOP_BACK)
+			pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
+		else
+			pmboxq->u.mb.un.varInitLnk.link_flags =
+				FLAGS_TOPOLOGY_MODE_LOOP;
+
+		pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
+		pmboxq->u.mb.mbxOwner = OWN_HOST;
+
+		mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
+						     LPFC_MBOX_TMO);
+
+		if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
+			rc = -ENODEV;
+		else {
+			spin_lock_irq(&phba->hbalock);
+			phba->link_flag |= LS_LOOPBACK_MODE;
+			spin_unlock_irq(&phba->hbalock);
+			/* wait for the link attention interrupt */
+			msleep(100);
+
+			i = 0;
+			while (phba->link_state != LPFC_HBA_READY) {
+				if (i++ > timeout) {
+					rc = -ETIMEDOUT;
+					break;
+				}
+
+				msleep(10);
+			}
+		}
+
+	} else
+		rc = -ENODEV;
+
+loopback_mode_exit:
+	lpfc_bsg_diag_mode_exit(phba);
+
+	/*
+	 * Let SLI layer release mboxq if mbox command completed after timeout.
+	 */
+	if (pmboxq && mbxstatus != MBX_TIMEOUT)
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+
+job_error:
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	/* complete the job back to userspace if no error */
+	if (rc == 0)
+		job->job_done(job);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
+ * @phba: Pointer to HBA context object.
+ * @diag: Flag for set link to diag or nomral operation state.
+ *
+ * This function is responsible for issuing a sli4 mailbox command for setting
+ * link to either diag state or normal operation state.
+ */
+static int
+lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
+{
+	LPFC_MBOXQ_t *pmboxq;
+	struct lpfc_mbx_set_link_diag_state *link_diag_state;
+	uint32_t req_len, alloc_len;
+	int mbxstatus = MBX_SUCCESS, rc;
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq)
+		return -ENOMEM;
+
+	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
+		   sizeof(struct lpfc_sli4_cfg_mhdr));
+	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
+				req_len, LPFC_SLI4_MBX_EMBED);
+	if (alloc_len != req_len) {
+		rc = -ENOMEM;
+		goto link_diag_state_set_out;
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
+			diag, phba->sli4_hba.lnk_info.lnk_tp,
+			phba->sli4_hba.lnk_info.lnk_no);
+
+	link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
+	bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
+	       phba->sli4_hba.lnk_info.lnk_no);
+	bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
+	       phba->sli4_hba.lnk_info.lnk_tp);
+	if (diag)
+		bf_set(lpfc_mbx_set_diag_state_diag,
+		       &link_diag_state->u.req, 1);
+	else
+		bf_set(lpfc_mbx_set_diag_state_diag,
+		       &link_diag_state->u.req, 0);
+
+	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+
+	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
+		rc = 0;
+	else
+		rc = -ENODEV;
+
+link_diag_state_set_out:
+	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+
+	return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is responsible for issuing a sli4 mailbox command for setting
+ * up internal loopback diagnostic.
+ */
+static int
+lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *pmboxq;
+	uint32_t req_len, alloc_len;
+	struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
+	int mbxstatus = MBX_SUCCESS, rc = 0;
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq)
+		return -ENOMEM;
+	req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
+		   sizeof(struct lpfc_sli4_cfg_mhdr));
+	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
+				req_len, LPFC_SLI4_MBX_EMBED);
+	if (alloc_len != req_len) {
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+		return -ENOMEM;
+	}
+	link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
+	bf_set(lpfc_mbx_set_diag_state_link_num,
+	       &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no);
+	bf_set(lpfc_mbx_set_diag_state_link_type,
+	       &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
+	bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
+	       LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
+
+	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+	if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"3127 Failed setup loopback mode mailbox "
+				"command, rc:x%x, status:x%x\n", mbxstatus,
+				pmboxq->u.mb.mbxStatus);
+		rc = -ENODEV;
+	}
+	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
+ * @phba: Pointer to HBA context object.
+ *
+ * This function set up SLI4 FC port registrations for diagnostic run, which
+ * includes all the rpis, vfi, and also vpi.
+ */
+static int
+lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
+{
+	int rc;
+
+	if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"3136 Port still had vfi registered: "
+				"mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
+				phba->pport->fc_myDID, phba->fcf.fcfi,
+				phba->sli4_hba.vfi_ids[phba->pport->vfi],
+				phba->vpi_ids[phba->pport->vpi]);
+		return -EINVAL;
+	}
+	rc = lpfc_issue_reg_vfi(phba->pport);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
+ * @phba: Pointer to HBA context object.
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for placing an sli4 port into diagnostic
+ * loopback mode in order to perform a diagnostic loopback test.
+ */
+static int
+lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+	struct diag_mode_set *loopback_mode;
+	uint32_t link_flags, timeout;
+	int i, rc = 0;
+
+	/* no data to return just the return code */
+	job->reply->reply_payload_rcv_len = 0;
+
+	if (job->request_len < sizeof(struct fc_bsg_request) +
+	    sizeof(struct diag_mode_set)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"3011 Received DIAG MODE request size:%d "
+				"below the minimum size:%d\n",
+				job->request_len,
+				(int)(sizeof(struct fc_bsg_request) +
+				sizeof(struct diag_mode_set)));
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	rc = lpfc_bsg_diag_mode_enter(phba);
+	if (rc)
+		goto job_error;
+
+	/* indicate we are in loobpack diagnostic mode */
+	spin_lock_irq(&phba->hbalock);
+	phba->link_flag |= LS_LOOPBACK_MODE;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* reset port to start frome scratch */
+	rc = lpfc_selective_reset(phba);
+	if (rc)
+		goto job_error;
+
+	/* bring the link to diagnostic mode */
+	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"3129 Bring link to diagnostic state.\n");
+	loopback_mode = (struct diag_mode_set *)
+		job->request->rqst_data.h_vendor.vendor_cmd;
+	link_flags = loopback_mode->type;
+	timeout = loopback_mode->timeout * 100;
+
+	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"3130 Failed to bring link to diagnostic "
+				"state, rc:x%x\n", rc);
+		goto loopback_mode_exit;
+	}
+
+	/* wait for link down before proceeding */
+	i = 0;
+	while (phba->link_state != LPFC_LINK_DOWN) {
+		if (i++ > timeout) {
+			rc = -ETIMEDOUT;
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"3131 Timeout waiting for link to "
+					"diagnostic mode, timeout:%d ms\n",
+					timeout * 10);
+			goto loopback_mode_exit;
+		}
+		msleep(10);
+	}
+
+	/* set up loopback mode */
+	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"3132 Set up loopback mode:x%x\n", link_flags);
+
+	if (link_flags == INTERNAL_LOOP_BACK)
+		rc = lpfc_sli4_bsg_set_internal_loopback(phba);
+	else if (link_flags == EXTERNAL_LOOP_BACK)
+		rc = lpfc_hba_init_link_fc_topology(phba,
+						    FLAGS_TOPOLOGY_MODE_PT_PT,
+						    MBX_NOWAIT);
+	else {
+		rc = -EINVAL;
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"3141 Loopback mode:x%x not supported\n",
+				link_flags);
+		goto loopback_mode_exit;
+	}
+
+	if (!rc) {
+		/* wait for the link attention interrupt */
+		msleep(100);
+		i = 0;
+		while (phba->link_state < LPFC_LINK_UP) {
+			if (i++ > timeout) {
+				rc = -ETIMEDOUT;
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"3137 Timeout waiting for link up "
+					"in loopback mode, timeout:%d ms\n",
+					timeout * 10);
+				break;
+			}
+			msleep(10);
+		}
+	}
+
+	/* port resource registration setup for loopback diagnostic */
+	if (!rc) {
+		/* set up a none zero myDID for loopback test */
+		phba->pport->fc_myDID = 1;
+		rc = lpfc_sli4_diag_fcport_reg_setup(phba);
+	} else
+		goto loopback_mode_exit;
+
+	if (!rc) {
+		/* wait for the port ready */
+		msleep(100);
+		i = 0;
+		while (phba->link_state != LPFC_HBA_READY) {
+			if (i++ > timeout) {
+				rc = -ETIMEDOUT;
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"3133 Timeout waiting for port "
+					"loopback mode ready, timeout:%d ms\n",
+					timeout * 10);
+				break;
+			}
+			msleep(10);
+		}
+	}
+
+loopback_mode_exit:
+	/* clear loopback diagnostic mode */
+	if (rc) {
+		spin_lock_irq(&phba->hbalock);
+		phba->link_flag &= ~LS_LOOPBACK_MODE;
+		spin_unlock_irq(&phba->hbalock);
+	}
+	lpfc_bsg_diag_mode_exit(phba);
+
+job_error:
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	/* complete the job back to userspace if no error */
+	if (rc == 0)
+		job->job_done(job);
+	return rc;
+}
+
+/**
+ * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for responding to check and dispatch bsg diag
+ * command from the user to proper driver action routines.
+ */
+static int
+lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
+{
+	struct Scsi_Host *shost;
+	struct lpfc_vport *vport;
+	struct lpfc_hba *phba;
+	int rc;
+
+	shost = job->shost;
+	if (!shost)
+		return -ENODEV;
+	vport = (struct lpfc_vport *)job->shost->hostdata;
+	if (!vport)
+		return -ENODEV;
+	phba = vport->phba;
+	if (!phba)
+		return -ENODEV;
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
+	else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+		 LPFC_SLI_INTF_IF_TYPE_2)
+		rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
+	else
+		rc = -ENODEV;
+
+	return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
+ *
+ * This function is responsible for responding to check and dispatch bsg diag
+ * command from the user to proper driver action routines.
+ */
+static int
+lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
+{
+	struct Scsi_Host *shost;
+	struct lpfc_vport *vport;
+	struct lpfc_hba *phba;
+	struct diag_mode_set *loopback_mode_end_cmd;
+	uint32_t timeout;
+	int rc, i;
+
+	shost = job->shost;
+	if (!shost)
+		return -ENODEV;
+	vport = (struct lpfc_vport *)job->shost->hostdata;
+	if (!vport)
+		return -ENODEV;
+	phba = vport->phba;
+	if (!phba)
+		return -ENODEV;
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		return -ENODEV;
+	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+	    LPFC_SLI_INTF_IF_TYPE_2)
+		return -ENODEV;
+
+	/* clear loopback diagnostic mode */
+	spin_lock_irq(&phba->hbalock);
+	phba->link_flag &= ~LS_LOOPBACK_MODE;
+	spin_unlock_irq(&phba->hbalock);
+	loopback_mode_end_cmd = (struct diag_mode_set *)
+			job->request->rqst_data.h_vendor.vendor_cmd;
+	timeout = loopback_mode_end_cmd->timeout * 100;
+
+	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"3139 Failed to bring link to diagnostic "
+				"state, rc:x%x\n", rc);
+		goto loopback_mode_end_exit;
+	}
+
+	/* wait for link down before proceeding */
+	i = 0;
+	while (phba->link_state != LPFC_LINK_DOWN) {
+		if (i++ > timeout) {
+			rc = -ETIMEDOUT;
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"3140 Timeout waiting for link to "
+					"diagnostic mode_end, timeout:%d ms\n",
+					timeout * 10);
+			/* there is nothing much we can do here */
+			break;
+		}
+		msleep(10);
+	}
+
+	/* reset port resource registrations */
+	rc = lpfc_selective_reset(phba);
+	phba->pport->fc_myDID = 0;
+
+loopback_mode_end_exit:
+	/* make return code available to userspace */
+	job->reply->result = rc;
+	/* complete the job back to userspace if no error */
+	if (rc == 0)
+		job->job_done(job);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
+ * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
+ *
+ * This function is to perform SLI4 diag link test request from the user
+ * applicaiton.
+ */
+static int
+lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
+{
+	struct Scsi_Host *shost;
+	struct lpfc_vport *vport;
+	struct lpfc_hba *phba;
+	LPFC_MBOXQ_t *pmboxq;
+	struct sli4_link_diag *link_diag_test_cmd;
+	uint32_t req_len, alloc_len;
+	uint32_t timeout;
+	struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
+	union lpfc_sli4_cfg_shdr *shdr;
+	uint32_t shdr_status, shdr_add_status;
+	struct diag_status *diag_status_reply;
+	int mbxstatus, rc = 0;
+
+	shost = job->shost;
+	if (!shost) {
+		rc = -ENODEV;
+		goto job_error;
+	}
+	vport = (struct lpfc_vport *)job->shost->hostdata;
+	if (!vport) {
+		rc = -ENODEV;
+		goto job_error;
+	}
+	phba = vport->phba;
+	if (!phba) {
+		rc = -ENODEV;
+		goto job_error;
+	}
+
+	if (phba->sli_rev < LPFC_SLI_REV4) {
+		rc = -ENODEV;
+		goto job_error;
+	}
+	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+	    LPFC_SLI_INTF_IF_TYPE_2) {
+		rc = -ENODEV;
+		goto job_error;
+	}
+
+	if (job->request_len < sizeof(struct fc_bsg_request) +
+	    sizeof(struct sli4_link_diag)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"3013 Received LINK DIAG TEST request "
+				" size:%d below the minimum size:%d\n",
+				job->request_len,
+				(int)(sizeof(struct fc_bsg_request) +
+				sizeof(struct sli4_link_diag)));
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	rc = lpfc_bsg_diag_mode_enter(phba);
+	if (rc)
+		goto job_error;
+
+	link_diag_test_cmd = (struct sli4_link_diag *)
+			 job->request->rqst_data.h_vendor.vendor_cmd;
+	timeout = link_diag_test_cmd->timeout * 100;
+
+	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
+
+	if (rc)
+		goto job_error;
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq) {
+		rc = -ENOMEM;
+		goto link_diag_test_exit;
+	}
+
+	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
+		   sizeof(struct lpfc_sli4_cfg_mhdr));
+	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+				     LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
+				     req_len, LPFC_SLI4_MBX_EMBED);
+	if (alloc_len != req_len) {
+		rc = -ENOMEM;
+		goto link_diag_test_exit;
+	}
+	run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
+	bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
+	       phba->sli4_hba.lnk_info.lnk_no);
+	bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
+	       phba->sli4_hba.lnk_info.lnk_tp);
+	bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
+	       link_diag_test_cmd->test_id);
+	bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
+	       link_diag_test_cmd->loops);
+	bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
+	       link_diag_test_cmd->test_version);
+	bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
+	       link_diag_test_cmd->error_action);
+
+	mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || mbxstatus) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"3010 Run link diag test mailbox failed with "
+				"mbx_status x%x status x%x, add_status x%x\n",
+				mbxstatus, shdr_status, shdr_add_status);
+	}
+
+	diag_status_reply = (struct diag_status *)
+			    job->reply->reply_data.vendor_reply.vendor_rsp;
+
+	if (job->reply_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"3012 Received Run link diag test reply "
+				"below minimum size (%d): reply_len:%d\n",
+				(int)(sizeof(struct fc_bsg_request) +
+				sizeof(struct diag_status)),
+				job->reply_len);
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	diag_status_reply->mbox_status = mbxstatus;
+	diag_status_reply->shdr_status = shdr_status;
+	diag_status_reply->shdr_add_status = shdr_add_status;
+
+link_diag_test_exit:
+	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+
+	if (pmboxq)
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+
+	lpfc_bsg_diag_mode_exit(phba);
+
+job_error:
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	/* complete the job back to userspace if no error */
+	if (rc == 0)
+		job->job_done(job);
+	return rc;
+}
+
+/**
+ * lpfcdiag_loop_self_reg - obtains a remote port login id
+ * @phba: Pointer to HBA context object
+ * @rpi: Pointer to a remote port login id
+ *
+ * This function obtains a remote port login id so the diag loopback test
+ * can send and receive its own unsolicited CT command.
+ **/
+static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
+{
+	LPFC_MBOXQ_t *mbox;
+	struct lpfc_dmabuf *dmabuff;
+	int status;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
+				(uint8_t *)&phba->pport->fc_sparam,
+				mbox, *rpi);
+	else {
+		*rpi = lpfc_sli4_alloc_rpi(phba);
+		status = lpfc_reg_rpi(phba, phba->pport->vpi,
+				phba->pport->fc_myDID,
+				(uint8_t *)&phba->pport->fc_sparam,
+				mbox, *rpi);
+	}
+
+	if (status) {
+		mempool_free(mbox, phba->mbox_mem_pool);
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			lpfc_sli4_free_rpi(phba, *rpi);
+		return -ENOMEM;
+	}
+
+	dmabuff = (struct lpfc_dmabuf *) mbox->context1;
+	mbox->context1 = NULL;
+	mbox->context2 = NULL;
+	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+
+	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
+		lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
+		kfree(dmabuff);
+		if (status != MBX_TIMEOUT)
+			mempool_free(mbox, phba->mbox_mem_pool);
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			lpfc_sli4_free_rpi(phba, *rpi);
+		return -ENODEV;
+	}
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		*rpi = mbox->u.mb.un.varWords[0];
+
+	lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
+	kfree(dmabuff);
+	mempool_free(mbox, phba->mbox_mem_pool);
+	return 0;
+}
+
+/**
+ * lpfcdiag_loop_self_unreg - unregs from the rpi
+ * @phba: Pointer to HBA context object
+ * @rpi: Remote port login id
+ *
+ * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
+ **/
+static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
+{
+	LPFC_MBOXQ_t *mbox;
+	int status;
+
+	/* Allocate mboxq structure */
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (mbox == NULL)
+		return -ENOMEM;
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		lpfc_unreg_login(phba, 0, rpi, mbox);
+	else
+		lpfc_unreg_login(phba, phba->pport->vpi,
+				 phba->sli4_hba.rpi_ids[rpi], mbox);
+
+	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+
+	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
+		if (status != MBX_TIMEOUT)
+			mempool_free(mbox, phba->mbox_mem_pool);
+		return -EIO;
+	}
+	mempool_free(mbox, phba->mbox_mem_pool);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_sli4_free_rpi(phba, rpi);
+	return 0;
+}
+
+/**
+ * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
+ * @phba: Pointer to HBA context object
+ * @rpi: Remote port login id
+ * @txxri: Pointer to transmit exchange id
+ * @rxxri: Pointer to response exchabge id
+ *
+ * This function obtains the transmit and receive ids required to send
+ * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
+ * flags are used to the unsolicted response handler is able to process
+ * the ct command sent on the same port.
+ **/
+static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
+			 uint16_t *txxri, uint16_t * rxxri)
+{
+	struct lpfc_bsg_event *evt;
+	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
+	IOCB_t *cmd, *rsp;
+	struct lpfc_dmabuf *dmabuf;
+	struct ulp_bde64 *bpl = NULL;
+	struct lpfc_sli_ct_request *ctreq = NULL;
+	int ret_val = 0;
+	int time_left;
+	int iocb_stat = 0;
+	unsigned long flags;
+
+	*txxri = 0;
+	*rxxri = 0;
+	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
+				SLI_CT_ELX_LOOPBACK);
+	if (!evt)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	list_add(&evt->node, &phba->ct_ev_waiters);
+	lpfc_bsg_event_ref(evt);
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	cmdiocbq = lpfc_sli_get_iocbq(phba);
+	rspiocbq = lpfc_sli_get_iocbq(phba);
+
+	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (dmabuf) {
+		dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
+		if (dmabuf->virt) {
+			INIT_LIST_HEAD(&dmabuf->list);
+			bpl = (struct ulp_bde64 *) dmabuf->virt;
+			memset(bpl, 0, sizeof(*bpl));
+			ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
+			bpl->addrHigh =
+				le32_to_cpu(putPaddrHigh(dmabuf->phys +
+					sizeof(*bpl)));
+			bpl->addrLow =
+				le32_to_cpu(putPaddrLow(dmabuf->phys +
+					sizeof(*bpl)));
+			bpl->tus.f.bdeFlags = 0;
+			bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
+			bpl->tus.w = le32_to_cpu(bpl->tus.w);
+		}
+	}
+
+	if (cmdiocbq == NULL || rspiocbq == NULL ||
+	    dmabuf == NULL || bpl == NULL || ctreq == NULL ||
+		dmabuf->virt == NULL) {
+		ret_val = -ENOMEM;
+		goto err_get_xri_exit;
+	}
+
+	cmd = &cmdiocbq->iocb;
+	rsp = &rspiocbq->iocb;
+
+	memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
+
+	ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
+	ctreq->RevisionId.bits.InId = 0;
+	ctreq->FsType = SLI_CT_ELX_LOOPBACK;
+	ctreq->FsSubType = 0;
+	ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
+	ctreq->CommandResponse.bits.Size = 0;
+
+
+	cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
+	cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
+	cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+	cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
+
+	cmd->un.xseq64.w5.hcsw.Fctl = LA;
+	cmd->un.xseq64.w5.hcsw.Dfctl = 0;
+	cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+	cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
+
+	cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
+	cmd->ulpBdeCount = 1;
+	cmd->ulpLe = 1;
+	cmd->ulpClass = CLASS3;
+	cmd->ulpContext = rpi;
+
+	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+	cmdiocbq->vport = phba->pport;
+
+	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
+				rspiocbq,
+				(phba->fc_ratov * 2)
+				+ LPFC_DRVR_TIMEOUT);
+	if (iocb_stat) {
+		ret_val = -EIO;
+		goto err_get_xri_exit;
+	}
+	*txxri =  rsp->ulpContext;
+
+	evt->waiting = 1;
+	evt->wait_time_stamp = jiffies;
+	time_left = wait_event_interruptible_timeout(
+		evt->wq, !list_empty(&evt->events_to_see),
+		((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
+	if (list_empty(&evt->events_to_see))
+		ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
+	else {
+		spin_lock_irqsave(&phba->ct_ev_lock, flags);
+		list_move(evt->events_to_see.prev, &evt->events_to_get);
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		*rxxri = (list_entry(evt->events_to_get.prev,
+				     typeof(struct event_data),
+				     node))->immed_dat;
+	}
+	evt->waiting = 0;
+
+err_get_xri_exit:
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	lpfc_bsg_event_unref(evt); /* release ref */
+	lpfc_bsg_event_unref(evt); /* delete */
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	if (dmabuf) {
+		if (dmabuf->virt)
+			lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+		kfree(dmabuf);
+	}
+
+	if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
+		lpfc_sli_release_iocbq(phba, cmdiocbq);
+	if (rspiocbq)
+		lpfc_sli_release_iocbq(phba, rspiocbq);
+	return ret_val;
+}
+
+/**
+ * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
+ * @phba: Pointer to HBA context object
+ *
+ * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
+ * retruns the pointer to the buffer.
+ **/
+static struct lpfc_dmabuf *
+lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
+{
+	struct lpfc_dmabuf *dmabuf;
+	struct pci_dev *pcidev = phba->pcidev;
+
+	/* allocate dma buffer struct */
+	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!dmabuf)
+		return NULL;
+
+	INIT_LIST_HEAD(&dmabuf->list);
+
+	/* now, allocate dma buffer */
+	dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+					  &(dmabuf->phys), GFP_KERNEL);
+
+	if (!dmabuf->virt) {
+		kfree(dmabuf);
+		return NULL;
+	}
+	memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
+
+	return dmabuf;
+}
+
+/**
+ * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
+ *
+ * This routine just simply frees a dma buffer and its associated buffer
+ * descriptor referred by @dmabuf.
+ **/
+static void
+lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
+{
+	struct pci_dev *pcidev = phba->pcidev;
+
+	if (!dmabuf)
+		return;
+
+	if (dmabuf->virt)
+		dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+				  dmabuf->virt, dmabuf->phys);
+	kfree(dmabuf);
+	return;
+}
+
+/**
+ * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
+ * @phba: Pointer to HBA context object.
+ * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
+ *
+ * This routine just simply frees all dma buffers and their associated buffer
+ * descriptors referred by @dmabuf_list.
+ **/
+static void
+lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
+			    struct list_head *dmabuf_list)
+{
+	struct lpfc_dmabuf *dmabuf, *next_dmabuf;
+
+	if (list_empty(dmabuf_list))
+		return;
+
+	list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
+		list_del_init(&dmabuf->list);
+		lpfc_bsg_dma_page_free(phba, dmabuf);
+	}
+	return;
+}
+
+/**
+ * diag_cmd_data_alloc - fills in a bde struct with dma buffers
+ * @phba: Pointer to HBA context object
+ * @bpl: Pointer to 64 bit bde structure
+ * @size: Number of bytes to process
+ * @nocopydata: Flag to copy user data into the allocated buffer
+ *
+ * This function allocates page size buffers and populates an lpfc_dmabufext.
+ * If allowed the user data pointed to with indataptr is copied into the kernel
+ * memory. The chained list of page size buffers is returned.
+ **/
+static struct lpfc_dmabufext *
+diag_cmd_data_alloc(struct lpfc_hba *phba,
+		   struct ulp_bde64 *bpl, uint32_t size,
+		   int nocopydata)
+{
+	struct lpfc_dmabufext *mlist = NULL;
+	struct lpfc_dmabufext *dmp;
+	int cnt, offset = 0, i = 0;
+	struct pci_dev *pcidev;
+
+	pcidev = phba->pcidev;
+
+	while (size) {
+		/* We get chunks of 4K */
+		if (size > BUF_SZ_4K)
+			cnt = BUF_SZ_4K;
+		else
+			cnt = size;
+
+		/* allocate struct lpfc_dmabufext buffer header */
+		dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
+		if (!dmp)
+			goto out;
+
+		INIT_LIST_HEAD(&dmp->dma.list);
+
+		/* Queue it to a linked list */
+		if (mlist)
+			list_add_tail(&dmp->dma.list, &mlist->dma.list);
+		else
+			mlist = dmp;
+
+		/* allocate buffer */
+		dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
+						   cnt,
+						   &(dmp->dma.phys),
+						   GFP_KERNEL);
+
+		if (!dmp->dma.virt)
+			goto out;
+
+		dmp->size = cnt;
+
+		if (nocopydata) {
+			bpl->tus.f.bdeFlags = 0;
+			pci_dma_sync_single_for_device(phba->pcidev,
+				dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
+
+		} else {
+			memset((uint8_t *)dmp->dma.virt, 0, cnt);
+			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+		}
+
+		/* build buffer ptr list for IOCB */
+		bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
+		bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
+		bpl->tus.f.bdeSize = (ushort) cnt;
+		bpl->tus.w = le32_to_cpu(bpl->tus.w);
+		bpl++;
+
+		i++;
+		offset += cnt;
+		size -= cnt;
+	}
+
+	mlist->flag = i;
+	return mlist;
+out:
+	diag_cmd_data_free(phba, mlist);
+	return NULL;
+}
+
+/**
+ * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
+ * @phba: Pointer to HBA context object
+ * @rxxri: Receive exchange id
+ * @len: Number of data bytes
+ *
+ * This function allocates and posts a data buffer of sufficient size to receive
+ * an unsolicted CT command.
+ **/
+static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
+			     size_t len)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
+	struct lpfc_iocbq *cmdiocbq;
+	IOCB_t *cmd = NULL;
+	struct list_head head, *curr, *next;
+	struct lpfc_dmabuf *rxbmp;
+	struct lpfc_dmabuf *dmp;
+	struct lpfc_dmabuf *mp[2] = {NULL, NULL};
+	struct ulp_bde64 *rxbpl = NULL;
+	uint32_t num_bde;
+	struct lpfc_dmabufext *rxbuffer = NULL;
+	int ret_val = 0;
+	int iocb_stat;
+	int i = 0;
+
+	cmdiocbq = lpfc_sli_get_iocbq(phba);
+	rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (rxbmp != NULL) {
+		rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
+		if (rxbmp->virt) {
+			INIT_LIST_HEAD(&rxbmp->list);
+			rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+			rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
+		}
+	}
+
+	if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
+		ret_val = -ENOMEM;
+		goto err_post_rxbufs_exit;
+	}
+
+	/* Queue buffers for the receive exchange */
+	num_bde = (uint32_t)rxbuffer->flag;
+	dmp = &rxbuffer->dma;
+
+	cmd = &cmdiocbq->iocb;
+	i = 0;
+
+	INIT_LIST_HEAD(&head);
+	list_add_tail(&head, &dmp->list);
+	list_for_each_safe(curr, next, &head) {
+		mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
+		list_del(curr);
+
+		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+			mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
+			cmd->un.quexri64cx.buff.bde.addrHigh =
+				putPaddrHigh(mp[i]->phys);
+			cmd->un.quexri64cx.buff.bde.addrLow =
+				putPaddrLow(mp[i]->phys);
+			cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
+				((struct lpfc_dmabufext *)mp[i])->size;
+			cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
+			cmd->ulpCommand = CMD_QUE_XRI64_CX;
+			cmd->ulpPU = 0;
+			cmd->ulpLe = 1;
+			cmd->ulpBdeCount = 1;
+			cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
+
+		} else {
+			cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
+			cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
+			cmd->un.cont64[i].tus.f.bdeSize =
+				((struct lpfc_dmabufext *)mp[i])->size;
+					cmd->ulpBdeCount = ++i;
+
+			if ((--num_bde > 0) && (i < 2))
+				continue;
+
+			cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
+			cmd->ulpLe = 1;
+		}
+
+		cmd->ulpClass = CLASS3;
+		cmd->ulpContext = rxxri;
+
+		iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
+						0);
+		if (iocb_stat == IOCB_ERROR) {
+			diag_cmd_data_free(phba,
+				(struct lpfc_dmabufext *)mp[0]);
+			if (mp[1])
+				diag_cmd_data_free(phba,
+					  (struct lpfc_dmabufext *)mp[1]);
+			dmp = list_entry(next, struct lpfc_dmabuf, list);
+			ret_val = -EIO;
+			goto err_post_rxbufs_exit;
+		}
+
+		lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
+		if (mp[1]) {
+			lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
+			mp[1] = NULL;
+		}
+
+		/* The iocb was freed by lpfc_sli_issue_iocb */
+		cmdiocbq = lpfc_sli_get_iocbq(phba);
+		if (!cmdiocbq) {
+			dmp = list_entry(next, struct lpfc_dmabuf, list);
+			ret_val = -EIO;
+			goto err_post_rxbufs_exit;
+		}
+
+		cmd = &cmdiocbq->iocb;
+		i = 0;
+	}
+	list_del(&head);
+
+err_post_rxbufs_exit:
+
+	if (rxbmp) {
+		if (rxbmp->virt)
+			lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
+		kfree(rxbmp);
+	}
+
+	if (cmdiocbq)
+		lpfc_sli_release_iocbq(phba, cmdiocbq);
+	return ret_val;
+}
+
+/**
+ * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
+ * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
+ *
+ * This function receives a user data buffer to be transmitted and received on
+ * the same port, the link must be up and in loopback mode prior
+ * to being called.
+ * 1. A kernel buffer is allocated to copy the user data into.
+ * 2. The port registers with "itself".
+ * 3. The transmit and receive exchange ids are obtained.
+ * 4. The receive exchange id is posted.
+ * 5. A new els loopback event is created.
+ * 6. The command and response iocbs are allocated.
+ * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
+ *
+ * This function is meant to be called n times while the port is in loopback
+ * so it is the apps responsibility to issue a reset to take the port out
+ * of loopback mode.
+ **/
+static int
+lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct diag_mode_test *diag_mode;
+	struct lpfc_bsg_event *evt;
+	struct event_data *evdat;
+	struct lpfc_sli *psli = &phba->sli;
+	uint32_t size;
+	uint32_t full_size;
+	size_t segment_len = 0, segment_offset = 0, current_offset = 0;
+	uint16_t rpi = 0;
+	struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
+	IOCB_t *cmd, *rsp = NULL;
+	struct lpfc_sli_ct_request *ctreq;
+	struct lpfc_dmabuf *txbmp;
+	struct ulp_bde64 *txbpl = NULL;
+	struct lpfc_dmabufext *txbuffer = NULL;
+	struct list_head head;
+	struct lpfc_dmabuf  *curr;
+	uint16_t txxri = 0, rxxri;
+	uint32_t num_bde;
+	uint8_t *ptr = NULL, *rx_databuf = NULL;
+	int rc = 0;
+	int time_left;
+	int iocb_stat;
+	unsigned long flags;
+	void *dataout = NULL;
+	uint32_t total_mem;
+
+	/* in case no data is returned return just the return code */
+	job->reply->reply_payload_rcv_len = 0;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2739 Received DIAG TEST request below minimum "
+				"size\n");
+		rc = -EINVAL;
+		goto loopback_test_exit;
+	}
+
+	if (job->request_payload.payload_len !=
+		job->reply_payload.payload_len) {
+		rc = -EINVAL;
+		goto loopback_test_exit;
+	}
+	diag_mode = (struct diag_mode_test *)
+		job->request->rqst_data.h_vendor.vendor_cmd;
+
+	if ((phba->link_state == LPFC_HBA_ERROR) ||
+	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
+	    (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
+		rc = -EACCES;
+		goto loopback_test_exit;
+	}
+
+	if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
+		rc = -EACCES;
+		goto loopback_test_exit;
+	}
+
+	size = job->request_payload.payload_len;
+	full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
+
+	if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
+		rc = -ERANGE;
+		goto loopback_test_exit;
+	}
+
+	if (full_size >= BUF_SZ_4K) {
+		/*
+		 * Allocate memory for ioctl data. If buffer is bigger than 64k,
+		 * then we allocate 64k and re-use that buffer over and over to
+		 * xfer the whole block. This is because Linux kernel has a
+		 * problem allocating more than 120k of kernel space memory. Saw
+		 * problem with GET_FCPTARGETMAPPING...
+		 */
+		if (size <= (64 * 1024))
+			total_mem = full_size;
+		else
+			total_mem = 64 * 1024;
+	} else
+		/* Allocate memory for ioctl data */
+		total_mem = BUF_SZ_4K;
+
+	dataout = kmalloc(total_mem, GFP_KERNEL);
+	if (dataout == NULL) {
+		rc = -ENOMEM;
+		goto loopback_test_exit;
+	}
+
+	ptr = dataout;
+	ptr += ELX_LOOPBACK_HEADER_SZ;
+	sg_copy_to_buffer(job->request_payload.sg_list,
+				job->request_payload.sg_cnt,
+				ptr, size);
+	rc = lpfcdiag_loop_self_reg(phba, &rpi);
+	if (rc)
+		goto loopback_test_exit;
+
+	if (phba->sli_rev < LPFC_SLI_REV4) {
+		rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
+		if (rc) {
+			lpfcdiag_loop_self_unreg(phba, rpi);
+			goto loopback_test_exit;
+		}
+
+		rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
+		if (rc) {
+			lpfcdiag_loop_self_unreg(phba, rpi);
+			goto loopback_test_exit;
+		}
+	}
+	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
+				SLI_CT_ELX_LOOPBACK);
+	if (!evt) {
+		lpfcdiag_loop_self_unreg(phba, rpi);
+		rc = -ENOMEM;
+		goto loopback_test_exit;
+	}
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	list_add(&evt->node, &phba->ct_ev_waiters);
+	lpfc_bsg_event_ref(evt);
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	cmdiocbq = lpfc_sli_get_iocbq(phba);
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		rspiocbq = lpfc_sli_get_iocbq(phba);
+	txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+
+	if (txbmp) {
+		txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
+		if (txbmp->virt) {
+			INIT_LIST_HEAD(&txbmp->list);
+			txbpl = (struct ulp_bde64 *) txbmp->virt;
+			txbuffer = diag_cmd_data_alloc(phba,
+							txbpl, full_size, 0);
+		}
+	}
+
+	if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
+		rc = -ENOMEM;
+		goto err_loopback_test_exit;
+	}
+	if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
+		rc = -ENOMEM;
+		goto err_loopback_test_exit;
+	}
+
+	cmd = &cmdiocbq->iocb;
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		rsp = &rspiocbq->iocb;
+
+	INIT_LIST_HEAD(&head);
+	list_add_tail(&head, &txbuffer->dma.list);
+	list_for_each_entry(curr, &head, list) {
+		segment_len = ((struct lpfc_dmabufext *)curr)->size;
+		if (current_offset == 0) {
+			ctreq = curr->virt;
+			memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
+			ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
+			ctreq->RevisionId.bits.InId = 0;
+			ctreq->FsType = SLI_CT_ELX_LOOPBACK;
+			ctreq->FsSubType = 0;
+			ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
+			ctreq->CommandResponse.bits.Size   = size;
+			segment_offset = ELX_LOOPBACK_HEADER_SZ;
+		} else
+			segment_offset = 0;
+
+		BUG_ON(segment_offset >= segment_len);
+		memcpy(curr->virt + segment_offset,
+			ptr + current_offset,
+			segment_len - segment_offset);
+
+		current_offset += segment_len - segment_offset;
+		BUG_ON(current_offset > size);
+	}
+	list_del(&head);
+
+	/* Build the XMIT_SEQUENCE iocb */
+	num_bde = (uint32_t)txbuffer->flag;
+
+	cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
+	cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
+	cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+	cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
+
+	cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
+	cmd->un.xseq64.w5.hcsw.Dfctl = 0;
+	cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+	cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
+
+	cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
+	cmd->ulpBdeCount = 1;
+	cmd->ulpLe = 1;
+	cmd->ulpClass = CLASS3;
+
+	if (phba->sli_rev < LPFC_SLI_REV4) {
+		cmd->ulpContext = txxri;
+	} else {
+		cmd->un.xseq64.bdl.ulpIoTag32 = 0;
+		cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
+		cmdiocbq->context3 = txbmp;
+		cmdiocbq->sli4_xritag = NO_XRI;
+		cmd->unsli3.rcvsli3.ox_id = 0xffff;
+	}
+	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+	cmdiocbq->vport = phba->pport;
+	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
+					     rspiocbq, (phba->fc_ratov * 2) +
+					     LPFC_DRVR_TIMEOUT);
+
+	if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) &&
+					   (rsp->ulpStatus != IOCB_SUCCESS))) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"3126 Failed loopback test issue iocb: "
+				"iocb_stat:x%x\n", iocb_stat);
+		rc = -EIO;
+		goto err_loopback_test_exit;
+	}
+
+	evt->waiting = 1;
+	time_left = wait_event_interruptible_timeout(
+		evt->wq, !list_empty(&evt->events_to_see),
+		((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
+	evt->waiting = 0;
+	if (list_empty(&evt->events_to_see)) {
+		rc = (time_left) ? -EINTR : -ETIMEDOUT;
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"3125 Not receiving unsolicited event, "
+				"rc:x%x\n", rc);
+	} else {
+		spin_lock_irqsave(&phba->ct_ev_lock, flags);
+		list_move(evt->events_to_see.prev, &evt->events_to_get);
+		evdat = list_entry(evt->events_to_get.prev,
+				   typeof(*evdat), node);
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		rx_databuf = evdat->data;
+		if (evdat->len != full_size) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"1603 Loopback test did not receive expected "
+				"data length. actual length 0x%x expected "
+				"length 0x%x\n",
+				evdat->len, full_size);
+			rc = -EIO;
+		} else if (rx_databuf == NULL)
+			rc = -EIO;
+		else {
+			rc = IOCB_SUCCESS;
+			/* skip over elx loopback header */
+			rx_databuf += ELX_LOOPBACK_HEADER_SZ;
+			job->reply->reply_payload_rcv_len =
+				sg_copy_from_buffer(job->reply_payload.sg_list,
+						    job->reply_payload.sg_cnt,
+						    rx_databuf, size);
+			job->reply->reply_payload_rcv_len = size;
+		}
+	}
+
+err_loopback_test_exit:
+	lpfcdiag_loop_self_unreg(phba, rpi);
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	lpfc_bsg_event_unref(evt); /* release ref */
+	lpfc_bsg_event_unref(evt); /* delete */
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	if (cmdiocbq != NULL)
+		lpfc_sli_release_iocbq(phba, cmdiocbq);
+
+	if (rspiocbq != NULL)
+		lpfc_sli_release_iocbq(phba, rspiocbq);
+
+	if (txbmp != NULL) {
+		if (txbpl != NULL) {
+			if (txbuffer != NULL)
+				diag_cmd_data_free(phba, txbuffer);
+			lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
+		}
+		kfree(txbmp);
+	}
+
+loopback_test_exit:
+	kfree(dataout);
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	job->dd_data = NULL;
+	/* complete the job back to userspace if no error */
+	if (rc == IOCB_SUCCESS)
+		job->job_done(job);
+	return rc;
+}
+
+/**
+ * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
+ * @job: GET_DFC_REV fc_bsg_job
+ **/
+static int
+lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct get_mgmt_rev *event_req;
+	struct get_mgmt_rev_reply *event_reply;
+	int rc = 0;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2740 Received GET_DFC_REV request below "
+				"minimum size\n");
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	event_req = (struct get_mgmt_rev *)
+		job->request->rqst_data.h_vendor.vendor_cmd;
+
+	event_reply = (struct get_mgmt_rev_reply *)
+		job->reply->reply_data.vendor_reply.vendor_rsp;
+
+	if (job->reply_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2741 Received GET_DFC_REV reply below "
+				"minimum size\n");
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
+	event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
+job_error:
+	job->reply->result = rc;
+	if (rc == 0)
+		job->job_done(job);
+	return rc;
+}
+
+/**
+ * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox commands issued from
+ * lpfc_bsg_issue_mbox function. This function is called by the
+ * mailbox event handler function with no lock held. This function
+ * will wake up thread waiting on the wait queue pointed by context1
+ * of the mailbox.
+ **/
+void
+lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+	struct bsg_job_data *dd_data;
+	struct fc_bsg_job *job;
+	uint32_t size;
+	unsigned long flags;
+	uint8_t *pmb, *pmb_buf;
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	dd_data = pmboxq->context1;
+	/* job already timed out? */
+	if (!dd_data) {
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		return;
+	}
+
+	/*
+	 * The outgoing buffer is readily referred from the dma buffer,
+	 * just need to get header part from mailboxq structure.
+	 */
+	pmb = (uint8_t *)&pmboxq->u.mb;
+	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
+
+	job = dd_data->context_un.mbox.set_job;
+	if (job) {
+		size = job->reply_payload.payload_len;
+		job->reply->reply_payload_rcv_len =
+			sg_copy_from_buffer(job->reply_payload.sg_list,
+					    job->reply_payload.sg_cnt,
+					    pmb_buf, size);
+		/* need to hold the lock until we set job->dd_data to NULL
+		 * to hold off the timeout handler returning to the mid-layer
+		 * while we are still processing the job.
+		 */
+		job->dd_data = NULL;
+		dd_data->context_un.mbox.set_job = NULL;
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+	} else {
+		dd_data->context_un.mbox.set_job = NULL;
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+	}
+
+	mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
+	lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
+	kfree(dd_data);
+
+	if (job) {
+		job->reply->result = 0;
+		job->job_done(job);
+	}
+	return;
+}
+
+/**
+ * lpfc_bsg_check_cmd_access - test for a supported mailbox command
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a mailbox object.
+ * @vport: Pointer to a vport object.
+ *
+ * Some commands require the port to be offline, some may not be called from
+ * the application.
+ **/
+static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
+	MAILBOX_t *mb, struct lpfc_vport *vport)
+{
+	/* return negative error values for bsg job */
+	switch (mb->mbxCommand) {
+	/* Offline only */
+	case MBX_INIT_LINK:
+	case MBX_DOWN_LINK:
+	case MBX_CONFIG_LINK:
+	case MBX_CONFIG_RING:
+	case MBX_RESET_RING:
+	case MBX_UNREG_LOGIN:
+	case MBX_CLEAR_LA:
+	case MBX_DUMP_CONTEXT:
+	case MBX_RUN_DIAGS:
+	case MBX_RESTART:
+	case MBX_SET_MASK:
+		if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2743 Command 0x%x is illegal in on-line "
+				"state\n",
+				mb->mbxCommand);
+			return -EPERM;
+		}
+	case MBX_WRITE_NV:
+	case MBX_WRITE_VPARMS:
+	case MBX_LOAD_SM:
+	case MBX_READ_NV:
+	case MBX_READ_CONFIG:
+	case MBX_READ_RCONFIG:
+	case MBX_READ_STATUS:
+	case MBX_READ_XRI:
+	case MBX_READ_REV:
+	case MBX_READ_LNK_STAT:
+	case MBX_DUMP_MEMORY:
+	case MBX_DOWN_LOAD:
+	case MBX_UPDATE_CFG:
+	case MBX_KILL_BOARD:
+	case MBX_LOAD_AREA:
+	case MBX_LOAD_EXP_ROM:
+	case MBX_BEACON:
+	case MBX_DEL_LD_ENTRY:
+	case MBX_SET_DEBUG:
+	case MBX_WRITE_WWN:
+	case MBX_SLI4_CONFIG:
+	case MBX_READ_EVENT_LOG:
+	case MBX_READ_EVENT_LOG_STATUS:
+	case MBX_WRITE_EVENT_LOG:
+	case MBX_PORT_CAPABILITIES:
+	case MBX_PORT_IOV_CONTROL:
+	case MBX_RUN_BIU_DIAG64:
+		break;
+	case MBX_SET_VARIABLE:
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"1226 mbox: set_variable 0x%x, 0x%x\n",
+			mb->un.varWords[0],
+			mb->un.varWords[1]);
+		if ((mb->un.varWords[0] == SETVAR_MLOMNT)
+			&& (mb->un.varWords[1] == 1)) {
+			phba->wait_4_mlo_maint_flg = 1;
+		} else if (mb->un.varWords[0] == SETVAR_MLORST) {
+			spin_lock_irq(&phba->hbalock);
+			phba->link_flag &= ~LS_LOOPBACK_MODE;
+			spin_unlock_irq(&phba->hbalock);
+			phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
+		}
+		break;
+	case MBX_READ_SPARM64:
+	case MBX_READ_TOPOLOGY:
+	case MBX_REG_LOGIN:
+	case MBX_REG_LOGIN64:
+	case MBX_CONFIG_PORT:
+	case MBX_RUN_BIU_DIAG:
+	default:
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+			"2742 Unknown Command 0x%x\n",
+			mb->mbxCommand);
+		return -EPERM;
+	}
+
+	return 0; /* ok */
+}
+
+/**
+ * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
+ * @phba: Pointer to HBA context object.
+ *
+ * This is routine clean up and reset BSG handling of multi-buffer mbox
+ * command session.
+ **/
+static void
+lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
+{
+	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
+		return;
+
+	/* free all memory, including dma buffers */
+	lpfc_bsg_dma_page_list_free(phba,
+				    &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+	lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
+	/* multi-buffer write mailbox command pass-through complete */
+	memset((char *)&phba->mbox_ext_buf_ctx, 0,
+	       sizeof(struct lpfc_mbox_ext_buf_ctx));
+	INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+	return;
+}
+
+/**
+ * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is routine handles BSG job for mailbox commands completions with
+ * multiple external buffers.
+ **/
+static struct fc_bsg_job *
+lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+	struct bsg_job_data *dd_data;
+	struct fc_bsg_job *job;
+	uint8_t *pmb, *pmb_buf;
+	unsigned long flags;
+	uint32_t size;
+	int rc = 0;
+	struct lpfc_dmabuf *dmabuf;
+	struct lpfc_sli_config_mbox *sli_cfg_mbx;
+	uint8_t *pmbx;
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	dd_data = pmboxq->context1;
+	/* has the job already timed out? */
+	if (!dd_data) {
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		job = NULL;
+		goto job_done_out;
+	}
+
+	/*
+	 * The outgoing buffer is readily referred from the dma buffer,
+	 * just need to get header part from mailboxq structure.
+	 */
+	pmb = (uint8_t *)&pmboxq->u.mb;
+	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+	/* Copy the byte swapped response mailbox back to the user */
+	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
+	/* if there is any non-embedded extended data copy that too */
+	dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
+	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
+	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
+		pmbx = (uint8_t *)dmabuf->virt;
+		/* byte swap the extended data following the mailbox command */
+		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
+			&pmbx[sizeof(MAILBOX_t)],
+			sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
+	}
+
+	job = dd_data->context_un.mbox.set_job;
+	if (job) {
+		size = job->reply_payload.payload_len;
+		job->reply->reply_payload_rcv_len =
+			sg_copy_from_buffer(job->reply_payload.sg_list,
+					    job->reply_payload.sg_cnt,
+					    pmb_buf, size);
+		/* result for successful */
+		job->reply->result = 0;
+		job->dd_data = NULL;
+		/* need to hold the lock util we set job->dd_data to NULL
+		 * to hold off the timeout handler from midlayer to take
+		 * any action.
+		 */
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2937 SLI_CONFIG ext-buffer maibox command "
+				"(x%x/x%x) complete bsg job done, bsize:%d\n",
+				phba->mbox_ext_buf_ctx.nembType,
+				phba->mbox_ext_buf_ctx.mboxType, size);
+		lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
+					phba->mbox_ext_buf_ctx.nembType,
+					phba->mbox_ext_buf_ctx.mboxType,
+					dma_ebuf, sta_pos_addr,
+					phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
+	} else
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+job_done_out:
+	if (!job)
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"2938 SLI_CONFIG ext-buffer maibox "
+				"command (x%x/x%x) failure, rc:x%x\n",
+				phba->mbox_ext_buf_ctx.nembType,
+				phba->mbox_ext_buf_ctx.mboxType, rc);
+	/* state change */
+	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
+	kfree(dd_data);
+
+	return job;
+}
+
+/**
+ * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox read commands with multiple
+ * external buffers.
+ **/
+static void
+lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+	struct fc_bsg_job *job;
+
+	/* handle the BSG job with mailbox command */
+	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
+		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"2939 SLI_CONFIG ext-buffer rd maibox command "
+			"complete, ctxState:x%x, mbxStatus:x%x\n",
+			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
+
+	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
+	if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
+		lpfc_bsg_mbox_ext_session_reset(phba);
+
+	/* free base driver mailbox structure memory */
+	mempool_free(pmboxq, phba->mbox_mem_pool);
+
+	/* complete the bsg job if we have it */
+	if (job)
+		job->job_done(job);
+
+	return;
+}
+
+/**
+ * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox write commands with multiple
+ * external buffers.
+ **/
+static void
+lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+	struct fc_bsg_job *job;
+
+	/* handle the BSG job with the mailbox command */
+	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
+		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"2940 SLI_CONFIG ext-buffer wr maibox command "
+			"complete, ctxState:x%x, mbxStatus:x%x\n",
+			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
+
+	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
+	/* free all memory, including dma buffers */
+	mempool_free(pmboxq, phba->mbox_mem_pool);
+	lpfc_bsg_mbox_ext_session_reset(phba);
+
+	/* complete the bsg job if we have it */
+	if (job)
+		job->job_done(job);
+
+	return;
+}
+
+static void
+lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
+				uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
+				struct lpfc_dmabuf *ext_dmabuf)
+{
+	struct lpfc_sli_config_mbox *sli_cfg_mbx;
+
+	/* pointer to the start of mailbox command */
+	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
+
+	if (nemb_tp == nemb_mse) {
+		if (index == 0) {
+			sli_cfg_mbx->un.sli_config_emb0_subsys.
+				mse[index].pa_hi =
+				putPaddrHigh(mbx_dmabuf->phys +
+					     sizeof(MAILBOX_t));
+			sli_cfg_mbx->un.sli_config_emb0_subsys.
+				mse[index].pa_lo =
+				putPaddrLow(mbx_dmabuf->phys +
+					    sizeof(MAILBOX_t));
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"2943 SLI_CONFIG(mse)[%d], "
+					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+					index,
+					sli_cfg_mbx->un.sli_config_emb0_subsys.
+					mse[index].buf_len,
+					sli_cfg_mbx->un.sli_config_emb0_subsys.
+					mse[index].pa_hi,
+					sli_cfg_mbx->un.sli_config_emb0_subsys.
+					mse[index].pa_lo);
+		} else {
+			sli_cfg_mbx->un.sli_config_emb0_subsys.
+				mse[index].pa_hi =
+				putPaddrHigh(ext_dmabuf->phys);
+			sli_cfg_mbx->un.sli_config_emb0_subsys.
+				mse[index].pa_lo =
+				putPaddrLow(ext_dmabuf->phys);
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"2944 SLI_CONFIG(mse)[%d], "
+					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+					index,
+					sli_cfg_mbx->un.sli_config_emb0_subsys.
+					mse[index].buf_len,
+					sli_cfg_mbx->un.sli_config_emb0_subsys.
+					mse[index].pa_hi,
+					sli_cfg_mbx->un.sli_config_emb0_subsys.
+					mse[index].pa_lo);
+		}
+	} else {
+		if (index == 0) {
+			sli_cfg_mbx->un.sli_config_emb1_subsys.
+				hbd[index].pa_hi =
+				putPaddrHigh(mbx_dmabuf->phys +
+					     sizeof(MAILBOX_t));
+			sli_cfg_mbx->un.sli_config_emb1_subsys.
+				hbd[index].pa_lo =
+				putPaddrLow(mbx_dmabuf->phys +
+					    sizeof(MAILBOX_t));
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"3007 SLI_CONFIG(hbd)[%d], "
+					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+				index,
+				bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+				&sli_cfg_mbx->un.
+				sli_config_emb1_subsys.hbd[index]),
+				sli_cfg_mbx->un.sli_config_emb1_subsys.
+				hbd[index].pa_hi,
+				sli_cfg_mbx->un.sli_config_emb1_subsys.
+				hbd[index].pa_lo);
+
+		} else {
+			sli_cfg_mbx->un.sli_config_emb1_subsys.
+				hbd[index].pa_hi =
+				putPaddrHigh(ext_dmabuf->phys);
+			sli_cfg_mbx->un.sli_config_emb1_subsys.
+				hbd[index].pa_lo =
+				putPaddrLow(ext_dmabuf->phys);
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"3008 SLI_CONFIG(hbd)[%d], "
+					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
+				index,
+				bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+				&sli_cfg_mbx->un.
+				sli_config_emb1_subsys.hbd[index]),
+				sli_cfg_mbx->un.sli_config_emb1_subsys.
+				hbd[index].pa_hi,
+				sli_cfg_mbx->un.sli_config_emb1_subsys.
+				hbd[index].pa_lo);
+		}
+	}
+	return;
+}
+
+/**
+ * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @nemb_tp: Enumerate of non-embedded mailbox command type.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
+ * non-embedded external bufffers.
+ **/
+static int
+lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+			      enum nemb_type nemb_tp,
+			      struct lpfc_dmabuf *dmabuf)
+{
+	struct lpfc_sli_config_mbox *sli_cfg_mbx;
+	struct dfc_mbox_req *mbox_req;
+	struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
+	uint32_t ext_buf_cnt, ext_buf_index;
+	struct lpfc_dmabuf *ext_dmabuf = NULL;
+	struct bsg_job_data *dd_data = NULL;
+	LPFC_MBOXQ_t *pmboxq = NULL;
+	MAILBOX_t *pmb;
+	uint8_t *pmbx;
+	int rc, i;
+
+	mbox_req =
+	   (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+	/* pointer to the start of mailbox command */
+	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+	if (nemb_tp == nemb_mse) {
+		ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
+			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+					"2945 Handled SLI_CONFIG(mse) rd, "
+					"ext_buf_cnt(%d) out of range(%d)\n",
+					ext_buf_cnt,
+					LPFC_MBX_SLI_CONFIG_MAX_MSE);
+			rc = -ERANGE;
+			goto job_error;
+		}
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2941 Handled SLI_CONFIG(mse) rd, "
+				"ext_buf_cnt:%d\n", ext_buf_cnt);
+	} else {
+		/* sanity check on interface type for support */
+		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+		    LPFC_SLI_INTF_IF_TYPE_2) {
+			rc = -ENODEV;
+			goto job_error;
+		}
+		/* nemb_tp == nemb_hbd */
+		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
+		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+					"2946 Handled SLI_CONFIG(hbd) rd, "
+					"ext_buf_cnt(%d) out of range(%d)\n",
+					ext_buf_cnt,
+					LPFC_MBX_SLI_CONFIG_MAX_HBD);
+			rc = -ERANGE;
+			goto job_error;
+		}
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2942 Handled SLI_CONFIG(hbd) rd, "
+				"ext_buf_cnt:%d\n", ext_buf_cnt);
+	}
+
+	/* before dma descriptor setup */
+	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
+					sta_pre_addr, dmabuf, ext_buf_cnt);
+
+	/* reject non-embedded mailbox command with none external buffer */
+	if (ext_buf_cnt == 0) {
+		rc = -EPERM;
+		goto job_error;
+	} else if (ext_buf_cnt > 1) {
+		/* additional external read buffers */
+		for (i = 1; i < ext_buf_cnt; i++) {
+			ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
+			if (!ext_dmabuf) {
+				rc = -ENOMEM;
+				goto job_error;
+			}
+			list_add_tail(&ext_dmabuf->list,
+				      &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+		}
+	}
+
+	/* bsg tracking structure */
+	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+	if (!dd_data) {
+		rc = -ENOMEM;
+		goto job_error;
+	}
+
+	/* mailbox command structure for base driver */
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq) {
+		rc = -ENOMEM;
+		goto job_error;
+	}
+	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+
+	/* for the first external buffer */
+	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
+
+	/* for the rest of external buffer descriptors if any */
+	if (ext_buf_cnt > 1) {
+		ext_buf_index = 1;
+		list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
+				&phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
+			lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
+						ext_buf_index, dmabuf,
+						curr_dmabuf);
+			ext_buf_index++;
+		}
+	}
+
+	/* after dma descriptor setup */
+	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
+					sta_pos_addr, dmabuf, ext_buf_cnt);
+
+	/* construct base driver mbox command */
+	pmb = &pmboxq->u.mb;
+	pmbx = (uint8_t *)dmabuf->virt;
+	memcpy(pmb, pmbx, sizeof(*pmb));
+	pmb->mbxOwner = OWN_HOST;
+	pmboxq->vport = phba->pport;
+
+	/* multi-buffer handling context */
+	phba->mbox_ext_buf_ctx.nembType = nemb_tp;
+	phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
+	phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
+	phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
+	phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
+	phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
+
+	/* callback for multi-buffer read mailbox command */
+	pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
+
+	/* context fields to callback function */
+	pmboxq->context1 = dd_data;
+	dd_data->type = TYPE_MBOX;
+	dd_data->context_un.mbox.pmboxq = pmboxq;
+	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
+	dd_data->context_un.mbox.set_job = job;
+	job->dd_data = dd_data;
+
+	/* state change */
+	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+	/*
+	 * Non-embedded mailbox subcommand data gets byte swapped here because
+	 * the lower level driver code only does the first 64 mailbox words.
+	 */
+	if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
+	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
+		(nemb_tp == nemb_mse))
+		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
+			&pmbx[sizeof(MAILBOX_t)],
+				sli_cfg_mbx->un.sli_config_emb0_subsys.
+					mse[0].buf_len);
+
+	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2947 Issued SLI_CONFIG ext-buffer "
+				"maibox command, rc:x%x\n", rc);
+		return SLI_CONFIG_HANDLED;
+	}
+	lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+			"2948 Failed to issue SLI_CONFIG ext-buffer "
+			"maibox command, rc:x%x\n", rc);
+	rc = -EPIPE;
+
+job_error:
+	if (pmboxq)
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+	lpfc_bsg_dma_page_list_free(phba,
+				    &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+	kfree(dd_data);
+	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
+	return rc;
+}
+
+/**
+ * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
+ * non-embedded external bufffers.
+ **/
+static int
+lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+			       enum nemb_type nemb_tp,
+			       struct lpfc_dmabuf *dmabuf)
+{
+	struct dfc_mbox_req *mbox_req;
+	struct lpfc_sli_config_mbox *sli_cfg_mbx;
+	uint32_t ext_buf_cnt;
+	struct bsg_job_data *dd_data = NULL;
+	LPFC_MBOXQ_t *pmboxq = NULL;
+	MAILBOX_t *pmb;
+	uint8_t *mbx;
+	int rc = SLI_CONFIG_NOT_HANDLED, i;
+
+	mbox_req =
+	   (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+	/* pointer to the start of mailbox command */
+	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+	if (nemb_tp == nemb_mse) {
+		ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
+			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+					"2953 Failed SLI_CONFIG(mse) wr, "
+					"ext_buf_cnt(%d) out of range(%d)\n",
+					ext_buf_cnt,
+					LPFC_MBX_SLI_CONFIG_MAX_MSE);
+			return -ERANGE;
+		}
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2949 Handled SLI_CONFIG(mse) wr, "
+				"ext_buf_cnt:%d\n", ext_buf_cnt);
+	} else {
+		/* sanity check on interface type for support */
+		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+		    LPFC_SLI_INTF_IF_TYPE_2)
+			return -ENODEV;
+		/* nemb_tp == nemb_hbd */
+		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
+		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+					"2954 Failed SLI_CONFIG(hbd) wr, "
+					"ext_buf_cnt(%d) out of range(%d)\n",
+					ext_buf_cnt,
+					LPFC_MBX_SLI_CONFIG_MAX_HBD);
+			return -ERANGE;
+		}
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2950 Handled SLI_CONFIG(hbd) wr, "
+				"ext_buf_cnt:%d\n", ext_buf_cnt);
+	}
+
+	/* before dma buffer descriptor setup */
+	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
+					sta_pre_addr, dmabuf, ext_buf_cnt);
+
+	if (ext_buf_cnt == 0)
+		return -EPERM;
+
+	/* for the first external buffer */
+	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
+
+	/* after dma descriptor setup */
+	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
+					sta_pos_addr, dmabuf, ext_buf_cnt);
+
+	/* log for looking forward */
+	for (i = 1; i < ext_buf_cnt; i++) {
+		if (nemb_tp == nemb_mse)
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
+				i, sli_cfg_mbx->un.sli_config_emb0_subsys.
+				mse[i].buf_len);
+		else
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
+				i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+				&sli_cfg_mbx->un.sli_config_emb1_subsys.
+				hbd[i]));
+	}
+
+	/* multi-buffer handling context */
+	phba->mbox_ext_buf_ctx.nembType = nemb_tp;
+	phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
+	phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
+	phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
+	phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
+	phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
+
+	if (ext_buf_cnt == 1) {
+		/* bsg tracking structure */
+		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+		if (!dd_data) {
+			rc = -ENOMEM;
+			goto job_error;
+		}
+
+		/* mailbox command structure for base driver */
+		pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (!pmboxq) {
+			rc = -ENOMEM;
+			goto job_error;
+		}
+		memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+		pmb = &pmboxq->u.mb;
+		mbx = (uint8_t *)dmabuf->virt;
+		memcpy(pmb, mbx, sizeof(*pmb));
+		pmb->mbxOwner = OWN_HOST;
+		pmboxq->vport = phba->pport;
+
+		/* callback for multi-buffer read mailbox command */
+		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
+
+		/* context fields to callback function */
+		pmboxq->context1 = dd_data;
+		dd_data->type = TYPE_MBOX;
+		dd_data->context_un.mbox.pmboxq = pmboxq;
+		dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
+		dd_data->context_un.mbox.set_job = job;
+		job->dd_data = dd_data;
+
+		/* state change */
+		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"2955 Issued SLI_CONFIG ext-buffer "
+					"maibox command, rc:x%x\n", rc);
+			return SLI_CONFIG_HANDLED;
+		}
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"2956 Failed to issue SLI_CONFIG ext-buffer "
+				"maibox command, rc:x%x\n", rc);
+		rc = -EPIPE;
+		goto job_error;
+	}
+
+	/* wait for additoinal external buffers */
+	job->reply->result = 0;
+	job->job_done(job);
+	return SLI_CONFIG_HANDLED;
+
+job_error:
+	if (pmboxq)
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+	kfree(dd_data);
+
+	return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
+ * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
+ * with embedded sussystem 0x1 and opcodes with external HBDs.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+			     struct lpfc_dmabuf *dmabuf)
+{
+	struct lpfc_sli_config_mbox *sli_cfg_mbx;
+	uint32_t subsys;
+	uint32_t opcode;
+	int rc = SLI_CONFIG_NOT_HANDLED;
+
+	/* state change on new multi-buffer pass-through mailbox command */
+	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
+
+	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+
+	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
+	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
+		subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
+				    &sli_cfg_mbx->un.sli_config_emb0_subsys);
+		opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
+				    &sli_cfg_mbx->un.sli_config_emb0_subsys);
+		if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
+			switch (opcode) {
+			case FCOE_OPCODE_READ_FCF:
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+						"2957 Handled SLI_CONFIG "
+						"subsys_fcoe, opcode:x%x\n",
+						opcode);
+				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+							nemb_mse, dmabuf);
+				break;
+			case FCOE_OPCODE_ADD_FCF:
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+						"2958 Handled SLI_CONFIG "
+						"subsys_fcoe, opcode:x%x\n",
+						opcode);
+				rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
+							nemb_mse, dmabuf);
+				break;
+			default:
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+						"2959 Reject SLI_CONFIG "
+						"subsys_fcoe, opcode:x%x\n",
+						opcode);
+				rc = -EPERM;
+				break;
+			}
+		} else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
+			switch (opcode) {
+			case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+						"3106 Handled SLI_CONFIG "
+						"subsys_comn, opcode:x%x\n",
+						opcode);
+				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+							nemb_mse, dmabuf);
+				break;
+			default:
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+						"3107 Reject SLI_CONFIG "
+						"subsys_comn, opcode:x%x\n",
+						opcode);
+				rc = -EPERM;
+				break;
+			}
+		} else {
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"2977 Reject SLI_CONFIG "
+					"subsys:x%d, opcode:x%x\n",
+					subsys, opcode);
+			rc = -EPERM;
+		}
+	} else {
+		subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
+				    &sli_cfg_mbx->un.sli_config_emb1_subsys);
+		opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
+				    &sli_cfg_mbx->un.sli_config_emb1_subsys);
+		if (subsys == SLI_CONFIG_SUBSYS_COMN) {
+			switch (opcode) {
+			case COMN_OPCODE_READ_OBJECT:
+			case COMN_OPCODE_READ_OBJECT_LIST:
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+						"2960 Handled SLI_CONFIG "
+						"subsys_comn, opcode:x%x\n",
+						opcode);
+				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+							nemb_hbd, dmabuf);
+				break;
+			case COMN_OPCODE_WRITE_OBJECT:
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+						"2961 Handled SLI_CONFIG "
+						"subsys_comn, opcode:x%x\n",
+						opcode);
+				rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
+							nemb_hbd, dmabuf);
+				break;
+			default:
+				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+						"2962 Not handled SLI_CONFIG "
+						"subsys_comn, opcode:x%x\n",
+						opcode);
+				rc = SLI_CONFIG_NOT_HANDLED;
+				break;
+			}
+		} else {
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"2978 Not handled SLI_CONFIG "
+					"subsys:x%d, opcode:x%x\n",
+					subsys, opcode);
+			rc = SLI_CONFIG_NOT_HANDLED;
+		}
+	}
+
+	/* state reset on not handled new multi-buffer mailbox command */
+	if (rc != SLI_CONFIG_HANDLED)
+		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
+
+	return rc;
+}
+
+/**
+ * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine is for requesting to abort a pass-through mailbox command with
+ * multiple external buffers due to error condition.
+ **/
+static void
+lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
+{
+	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
+		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
+	else
+		lpfc_bsg_mbox_ext_session_reset(phba);
+	return;
+}
+
+/**
+ * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * This routine extracts the next mailbox read external buffer back to
+ * user space through BSG.
+ **/
+static int
+lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
+{
+	struct lpfc_sli_config_mbox *sli_cfg_mbx;
+	struct lpfc_dmabuf *dmabuf;
+	uint8_t *pbuf;
+	uint32_t size;
+	uint32_t index;
+
+	index = phba->mbox_ext_buf_ctx.seqNum;
+	phba->mbox_ext_buf_ctx.seqNum++;
+
+	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
+			phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+
+	if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
+		size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
+			&sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2963 SLI_CONFIG (mse) ext-buffer rd get "
+				"buffer[%d], size:%d\n", index, size);
+	} else {
+		size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
+			&sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2964 SLI_CONFIG (hbd) ext-buffer rd get "
+				"buffer[%d], size:%d\n", index, size);
+	}
+	if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
+		return -EPIPE;
+	dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
+				  struct lpfc_dmabuf, list);
+	list_del_init(&dmabuf->list);
+
+	/* after dma buffer descriptor setup */
+	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
+					mbox_rd, dma_ebuf, sta_pos_addr,
+					dmabuf, index);
+
+	pbuf = (uint8_t *)dmabuf->virt;
+	job->reply->reply_payload_rcv_len =
+		sg_copy_from_buffer(job->reply_payload.sg_list,
+				    job->reply_payload.sg_cnt,
+				    pbuf, size);
+
+	lpfc_bsg_dma_page_free(phba, dmabuf);
+
+	if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
+				"command session done\n");
+		lpfc_bsg_mbox_ext_session_reset(phba);
+	}
+
+	job->reply->result = 0;
+	job->job_done(job);
+
+	return SLI_CONFIG_HANDLED;
+}
+
+/**
+ * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * This routine sets up the next mailbox read external buffer obtained
+ * from user space through BSG.
+ **/
+static int
+lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
+			struct lpfc_dmabuf *dmabuf)
+{
+	struct lpfc_sli_config_mbox *sli_cfg_mbx;
+	struct bsg_job_data *dd_data = NULL;
+	LPFC_MBOXQ_t *pmboxq = NULL;
+	MAILBOX_t *pmb;
+	enum nemb_type nemb_tp;
+	uint8_t *pbuf;
+	uint32_t size;
+	uint32_t index;
+	int rc;
+
+	index = phba->mbox_ext_buf_ctx.seqNum;
+	phba->mbox_ext_buf_ctx.seqNum++;
+	nemb_tp = phba->mbox_ext_buf_ctx.nembType;
+
+	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
+			phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+
+	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+	if (!dd_data) {
+		rc = -ENOMEM;
+		goto job_error;
+	}
+
+	pbuf = (uint8_t *)dmabuf->virt;
+	size = job->request_payload.payload_len;
+	sg_copy_to_buffer(job->request_payload.sg_list,
+			  job->request_payload.sg_cnt,
+			  pbuf, size);
+
+	if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2966 SLI_CONFIG (mse) ext-buffer wr set "
+				"buffer[%d], size:%d\n",
+				phba->mbox_ext_buf_ctx.seqNum, size);
+
+	} else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2967 SLI_CONFIG (hbd) ext-buffer wr set "
+				"buffer[%d], size:%d\n",
+				phba->mbox_ext_buf_ctx.seqNum, size);
+
+	}
+
+	/* set up external buffer descriptor and add to external buffer list */
+	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
+					phba->mbox_ext_buf_ctx.mbx_dmabuf,
+					dmabuf);
+	list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+	/* after write dma buffer */
+	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
+					mbox_wr, dma_ebuf, sta_pos_addr,
+					dmabuf, index);
+
+	if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2968 SLI_CONFIG ext-buffer wr all %d "
+				"ebuffers received\n",
+				phba->mbox_ext_buf_ctx.numBuf);
+		/* mailbox command structure for base driver */
+		pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (!pmboxq) {
+			rc = -ENOMEM;
+			goto job_error;
+		}
+		memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+		pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
+		pmb = &pmboxq->u.mb;
+		memcpy(pmb, pbuf, sizeof(*pmb));
+		pmb->mbxOwner = OWN_HOST;
+		pmboxq->vport = phba->pport;
+
+		/* callback for multi-buffer write mailbox command */
+		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
+
+		/* context fields to callback function */
+		pmboxq->context1 = dd_data;
+		dd_data->type = TYPE_MBOX;
+		dd_data->context_un.mbox.pmboxq = pmboxq;
+		dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
+		dd_data->context_un.mbox.set_job = job;
+		job->dd_data = dd_data;
+
+		/* state change */
+		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+
+		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"2969 Issued SLI_CONFIG ext-buffer "
+					"maibox command, rc:x%x\n", rc);
+			return SLI_CONFIG_HANDLED;
+		}
+		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+				"2970 Failed to issue SLI_CONFIG ext-buffer "
+				"maibox command, rc:x%x\n", rc);
+		rc = -EPIPE;
+		goto job_error;
+	}
+
+	/* wait for additoinal external buffers */
+	job->reply->result = 0;
+	job->job_done(job);
+	return SLI_CONFIG_HANDLED;
+
+job_error:
+	lpfc_bsg_dma_page_free(phba, dmabuf);
+	kfree(dd_data);
+
+	return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
+ * command with multiple non-embedded external buffers.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
+			     struct lpfc_dmabuf *dmabuf)
+{
+	int rc;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"2971 SLI_CONFIG buffer (type:x%x)\n",
+			phba->mbox_ext_buf_ctx.mboxType);
+
+	if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
+		if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+					"2972 SLI_CONFIG rd buffer state "
+					"mismatch:x%x\n",
+					phba->mbox_ext_buf_ctx.state);
+			lpfc_bsg_mbox_ext_abort(phba);
+			return -EPIPE;
+		}
+		rc = lpfc_bsg_read_ebuf_get(phba, job);
+		if (rc == SLI_CONFIG_HANDLED)
+			lpfc_bsg_dma_page_free(phba, dmabuf);
+	} else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
+		if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+					"2973 SLI_CONFIG wr buffer state "
+					"mismatch:x%x\n",
+					phba->mbox_ext_buf_ctx.state);
+			lpfc_bsg_mbox_ext_abort(phba);
+			return -EPIPE;
+		}
+		rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
+	}
+	return rc;
+}
+
+/**
+ * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a BSG mailbox object.
+ * @dmabuff: Pointer to a DMA buffer descriptor.
+ *
+ * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
+ * (0x9B) mailbox commands and external buffers.
+ **/
+static int
+lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+			    struct lpfc_dmabuf *dmabuf)
+{
+	struct dfc_mbox_req *mbox_req;
+	int rc = SLI_CONFIG_NOT_HANDLED;
+
+	mbox_req =
+	   (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+	/* mbox command with/without single external buffer */
+	if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
+		return rc;
+
+	/* mbox command and first external buffer */
+	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
+		if (mbox_req->extSeqNum == 1) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+					"2974 SLI_CONFIG mailbox: tag:%d, "
+					"seq:%d\n", mbox_req->extMboxTag,
+					mbox_req->extSeqNum);
+			rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
+			return rc;
+		} else
+			goto sli_cfg_ext_error;
+	}
+
+	/*
+	 * handle additional external buffers
+	 */
+
+	/* check broken pipe conditions */
+	if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
+		goto sli_cfg_ext_error;
+	if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
+		goto sli_cfg_ext_error;
+	if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
+		goto sli_cfg_ext_error;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+			"2975 SLI_CONFIG mailbox external buffer: "
+			"extSta:x%x, tag:%d, seq:%d\n",
+			phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
+			mbox_req->extSeqNum);
+	rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
+	return rc;
+
+sli_cfg_ext_error:
+	/* all other cases, broken pipe */
+	lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+			"2976 SLI_CONFIG mailbox broken pipe: "
+			"ctxSta:x%x, ctxNumBuf:%d "
+			"ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
+			phba->mbox_ext_buf_ctx.state,
+			phba->mbox_ext_buf_ctx.numBuf,
+			phba->mbox_ext_buf_ctx.mbxTag,
+			phba->mbox_ext_buf_ctx.seqNum,
+			mbox_req->extMboxTag, mbox_req->extSeqNum);
+
+	lpfc_bsg_mbox_ext_session_reset(phba);
+
+	return -EPIPE;
+}
+
+/**
+ * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a mailbox object.
+ * @vport: Pointer to a vport object.
+ *
+ * Allocate a tracking object, mailbox command memory, get a mailbox
+ * from the mailbox pool, copy the caller mailbox command.
+ *
+ * If offline and the sli is active we need to poll for the command (port is
+ * being reset) and com-plete the job, otherwise issue the mailbox command and
+ * let our completion handler finish the command.
+ **/
+static uint32_t
+lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+	struct lpfc_vport *vport)
+{
+	LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
+	MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
+	/* a 4k buffer to hold the mb and extended data from/to the bsg */
+	uint8_t *pmbx = NULL;
+	struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
+	struct lpfc_dmabuf *dmabuf = NULL;
+	struct dfc_mbox_req *mbox_req;
+	struct READ_EVENT_LOG_VAR *rdEventLog;
+	uint32_t transmit_length, receive_length, mode;
+	struct lpfc_mbx_sli4_config *sli4_config;
+	struct lpfc_mbx_nembed_cmd *nembed_sge;
+	struct mbox_header *header;
+	struct ulp_bde64 *bde;
+	uint8_t *ext = NULL;
+	int rc = 0;
+	uint8_t *from;
+	uint32_t size;
+
+
+	/* in case no data is transferred */
+	job->reply->reply_payload_rcv_len = 0;
+
+	/* sanity check to protect driver */
+	if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
+	    job->request_payload.payload_len > BSG_MBOX_SIZE) {
+		rc = -ERANGE;
+		goto job_done;
+	}
+
+	/*
+	 * Don't allow mailbox commands to be sent when blocked or when in
+	 * the middle of discovery
+	 */
+	 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
+		rc = -EAGAIN;
+		goto job_done;
+	}
+
+	mbox_req =
+	    (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+
+	/* check if requested extended data lengths are valid */
+	if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
+	    (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
+		rc = -ERANGE;
+		goto job_done;
+	}
+
+	dmabuf = lpfc_bsg_dma_page_alloc(phba);
+	if (!dmabuf || !dmabuf->virt) {
+		rc = -ENOMEM;
+		goto job_done;
+	}
+
+	/* Get the mailbox command or external buffer from BSG */
+	pmbx = (uint8_t *)dmabuf->virt;
+	size = job->request_payload.payload_len;
+	sg_copy_to_buffer(job->request_payload.sg_list,
+			  job->request_payload.sg_cnt, pmbx, size);
+
+	/* Handle possible SLI_CONFIG with non-embedded payloads */
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
+		if (rc == SLI_CONFIG_HANDLED)
+			goto job_cont;
+		if (rc)
+			goto job_done;
+		/* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
+	}
+
+	rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
+	if (rc != 0)
+		goto job_done; /* must be negative */
+
+	/* allocate our bsg tracking structure */
+	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+	if (!dd_data) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2727 Failed allocation of dd_data\n");
+		rc = -ENOMEM;
+		goto job_done;
+	}
+
+	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmboxq) {
+		rc = -ENOMEM;
+		goto job_done;
+	}
+	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+
+	pmb = &pmboxq->u.mb;
+	memcpy(pmb, pmbx, sizeof(*pmb));
+	pmb->mbxOwner = OWN_HOST;
+	pmboxq->vport = vport;
+
+	/* If HBA encountered an error attention, allow only DUMP
+	 * or RESTART mailbox commands until the HBA is restarted.
+	 */
+	if (phba->pport->stopped &&
+	    pmb->mbxCommand != MBX_DUMP_MEMORY &&
+	    pmb->mbxCommand != MBX_RESTART &&
+	    pmb->mbxCommand != MBX_WRITE_VPARMS &&
+	    pmb->mbxCommand != MBX_WRITE_WWN)
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+				"2797 mbox: Issued mailbox cmd "
+				"0x%x while in stopped state.\n",
+				pmb->mbxCommand);
+
+	/* extended mailbox commands will need an extended buffer */
+	if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
+		from = pmbx;
+		ext = from + sizeof(MAILBOX_t);
+		pmboxq->context2 = ext;
+		pmboxq->in_ext_byte_len =
+			mbox_req->inExtWLen * sizeof(uint32_t);
+		pmboxq->out_ext_byte_len =
+			mbox_req->outExtWLen * sizeof(uint32_t);
+		pmboxq->mbox_offset_word = mbox_req->mbOffset;
+	}
+
+	/* biu diag will need a kernel buffer to transfer the data
+	 * allocate our own buffer and setup the mailbox command to
+	 * use ours
+	 */
+	if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
+		transmit_length = pmb->un.varWords[1];
+		receive_length = pmb->un.varWords[4];
+		/* transmit length cannot be greater than receive length or
+		 * mailbox extension size
+		 */
+		if ((transmit_length > receive_length) ||
+			(transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
+			rc = -ERANGE;
+			goto job_done;
+		}
+		pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
+			putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
+		pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
+			putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
+
+		pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
+			putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
+			  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
+		pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
+			putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
+			  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
+	} else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
+		rdEventLog = &pmb->un.varRdEventLog;
+		receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
+		mode = bf_get(lpfc_event_log, rdEventLog);
+
+		/* receive length cannot be greater than mailbox
+		 * extension size
+		 */
+		if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
+			rc = -ERANGE;
+			goto job_done;
+		}
+
+		/* mode zero uses a bde like biu diags command */
+		if (mode == 0) {
+			pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
+							+ sizeof(MAILBOX_t));
+			pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
+							+ sizeof(MAILBOX_t));
+		}
+	} else if (phba->sli_rev == LPFC_SLI_REV4) {
+		/* Let type 4 (well known data) through because the data is
+		 * returned in varwords[4-8]
+		 * otherwise check the recieve length and fetch the buffer addr
+		 */
+		if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
+			(pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
+			/* rebuild the command for sli4 using our own buffers
+			* like we do for biu diags
+			*/
+			receive_length = pmb->un.varWords[2];
+			/* receive length cannot be greater than mailbox
+			 * extension size
+			 */
+			if (receive_length == 0) {
+				rc = -ERANGE;
+				goto job_done;
+			}
+			pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
+						+ sizeof(MAILBOX_t));
+			pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
+						+ sizeof(MAILBOX_t));
+		} else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
+			pmb->un.varUpdateCfg.co) {
+			bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
+
+			/* bde size cannot be greater than mailbox ext size */
+			if (bde->tus.f.bdeSize >
+			    BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
+				rc = -ERANGE;
+				goto job_done;
+			}
+			bde->addrHigh = putPaddrHigh(dmabuf->phys
+						+ sizeof(MAILBOX_t));
+			bde->addrLow = putPaddrLow(dmabuf->phys
+						+ sizeof(MAILBOX_t));
+		} else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
+			/* Handling non-embedded SLI_CONFIG mailbox command */
+			sli4_config = &pmboxq->u.mqe.un.sli4_config;
+			if (!bf_get(lpfc_mbox_hdr_emb,
+			    &sli4_config->header.cfg_mhdr)) {
+				/* rebuild the command for sli4 using our
+				 * own buffers like we do for biu diags
+				 */
+				header = (struct mbox_header *)
+						&pmb->un.varWords[0];
+				nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+						&pmb->un.varWords[0];
+				receive_length = nembed_sge->sge[0].length;
+
+				/* receive length cannot be greater than
+				 * mailbox extension size
+				 */
+				if ((receive_length == 0) ||
+				    (receive_length >
+				     BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
+					rc = -ERANGE;
+					goto job_done;
+				}
+
+				nembed_sge->sge[0].pa_hi =
+						putPaddrHigh(dmabuf->phys
+						   + sizeof(MAILBOX_t));
+				nembed_sge->sge[0].pa_lo =
+						putPaddrLow(dmabuf->phys
+						   + sizeof(MAILBOX_t));
+			}
+		}
+	}
+
+	dd_data->context_un.mbox.dmabuffers = dmabuf;
+
+	/* setup wake call as IOCB callback */
+	pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
+
+	/* setup context field to pass wait_queue pointer to wake function */
+	pmboxq->context1 = dd_data;
+	dd_data->type = TYPE_MBOX;
+	dd_data->context_un.mbox.pmboxq = pmboxq;
+	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
+	dd_data->context_un.mbox.set_job = job;
+	dd_data->context_un.mbox.ext = ext;
+	dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
+	dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
+	dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
+	job->dd_data = dd_data;
+
+	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+	    (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
+		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+		if (rc != MBX_SUCCESS) {
+			rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
+			goto job_done;
+		}
+
+		/* job finished, copy the data */
+		memcpy(pmbx, pmb, sizeof(*pmb));
+		job->reply->reply_payload_rcv_len =
+			sg_copy_from_buffer(job->reply_payload.sg_list,
+					    job->reply_payload.sg_cnt,
+					    pmbx, size);
+		/* not waiting mbox already done */
+		rc = 0;
+		goto job_done;
+	}
+
+	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
+		return 1; /* job started */
+
+job_done:
+	/* common exit for error or job completed inline */
+	if (pmboxq)
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+	lpfc_bsg_dma_page_free(phba, dmabuf);
+	kfree(dd_data);
+
+job_cont:
+	return rc;
+}
+
+/**
+ * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
+ * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
+ **/
+static int
+lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct dfc_mbox_req *mbox_req;
+	int rc = 0;
+
+	/* mix-and-match backward compatibility */
+	job->reply->reply_payload_rcv_len = 0;
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+				"2737 Mix-and-match backward compability "
+				"between MBOX_REQ old size:%d and "
+				"new request size:%d\n",
+				(int)(job->request_len -
+				      sizeof(struct fc_bsg_request)),
+				(int)sizeof(struct dfc_mbox_req));
+		mbox_req = (struct dfc_mbox_req *)
+				job->request->rqst_data.h_vendor.vendor_cmd;
+		mbox_req->extMboxTag = 0;
+		mbox_req->extSeqNum = 0;
+	}
+
+	rc = lpfc_bsg_issue_mbox(phba, job, vport);
+
+	if (rc == 0) {
+		/* job done */
+		job->reply->result = 0;
+		job->dd_data = NULL;
+		job->job_done(job);
+	} else if (rc == 1)
+		/* job submitted, will complete later*/
+		rc = 0; /* return zero, no error */
+	else {
+		/* some error occurred */
+		job->reply->result = rc;
+		job->dd_data = NULL;
+	}
+
+	return rc;
+}
+
+/**
+ * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_menlo_cmd function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from another thread which
+ * cleans up the SLI layer objects.
+ * This function copies the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
+			struct lpfc_iocbq *cmdiocbq,
+			struct lpfc_iocbq *rspiocbq)
+{
+	struct bsg_job_data *dd_data;
+	struct fc_bsg_job *job;
+	IOCB_t *rsp;
+	struct lpfc_dmabuf *bmp;
+	struct lpfc_bsg_menlo *menlo;
+	unsigned long flags;
+	struct menlo_response *menlo_resp;
+	int rc = 0;
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	dd_data = cmdiocbq->context1;
+	if (!dd_data) {
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		return;
+	}
+
+	menlo = &dd_data->context_un.menlo;
+	job = menlo->set_job;
+	job->dd_data = NULL; /* so timeout handler does not reply */
+
+	spin_lock(&phba->hbalock);
+	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
+	if (cmdiocbq->context2 && rspiocbq)
+		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
+		       &rspiocbq->iocb, sizeof(IOCB_t));
+	spin_unlock(&phba->hbalock);
+
+	bmp = menlo->bmp;
+	rspiocbq = menlo->rspiocbq;
+	rsp = &rspiocbq->iocb;
+
+	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+	/* always return the xri, this would be used in the case
+	 * of a menlo download to allow the data to be sent as a continuation
+	 * of the exchange.
+	 */
+	menlo_resp = (struct menlo_response *)
+		job->reply->reply_data.vendor_reply.vendor_rsp;
+	menlo_resp->xri = rsp->ulpContext;
+	if (rsp->ulpStatus) {
+		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+			switch (rsp->un.ulpWord[4] & 0xff) {
+			case IOERR_SEQUENCE_TIMEOUT:
+				rc = -ETIMEDOUT;
+				break;
+			case IOERR_INVALID_RPI:
+				rc = -EFAULT;
+				break;
+			default:
+				rc = -EACCES;
+				break;
+			}
+		} else
+			rc = -EACCES;
+	} else
+		job->reply->reply_payload_rcv_len =
+			rsp->un.genreq64.bdl.bdeSize;
+
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+	lpfc_sli_release_iocbq(phba, rspiocbq);
+	lpfc_sli_release_iocbq(phba, cmdiocbq);
+	kfree(bmp);
+	kfree(dd_data);
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	/* complete the job back to userspace */
+	job->job_done(job);
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+	return;
+}
+
+/**
+ * lpfc_menlo_cmd - send an ioctl for menlo hardware
+ * @job: fc_bsg_job to handle
+ *
+ * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
+ * all the command completions will return the xri for the command.
+ * For menlo data requests a gen request 64 CX is used to continue the exchange
+ * supplied in the menlo request header xri field.
+ **/
+static int
+lpfc_menlo_cmd(struct fc_bsg_job *job)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
+	IOCB_t *cmd, *rsp;
+	int rc = 0;
+	struct menlo_command *menlo_cmd;
+	struct menlo_response *menlo_resp;
+	struct lpfc_dmabuf *bmp = NULL;
+	int request_nseg;
+	int reply_nseg;
+	struct scatterlist *sgel = NULL;
+	int numbde;
+	dma_addr_t busaddr;
+	struct bsg_job_data *dd_data;
+	struct ulp_bde64 *bpl = NULL;
+
+	/* in case no data is returned return just the return code */
+	job->reply->reply_payload_rcv_len = 0;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) +
+		sizeof(struct menlo_command)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2784 Received MENLO_CMD request below "
+				"minimum size\n");
+		rc = -ERANGE;
+		goto no_dd_data;
+	}
+
+	if (job->reply_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2785 Received MENLO_CMD reply below "
+				"minimum size\n");
+		rc = -ERANGE;
+		goto no_dd_data;
+	}
+
+	if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2786 Adapter does not support menlo "
+				"commands\n");
+		rc = -EPERM;
+		goto no_dd_data;
+	}
+
+	menlo_cmd = (struct menlo_command *)
+		job->request->rqst_data.h_vendor.vendor_cmd;
+
+	menlo_resp = (struct menlo_response *)
+		job->reply->reply_data.vendor_reply.vendor_rsp;
+
+	/* allocate our bsg tracking structure */
+	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+	if (!dd_data) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2787 Failed allocation of dd_data\n");
+		rc = -ENOMEM;
+		goto no_dd_data;
+	}
+
+	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!bmp) {
+		rc = -ENOMEM;
+		goto free_dd;
+	}
+
+	cmdiocbq = lpfc_sli_get_iocbq(phba);
+	if (!cmdiocbq) {
+		rc = -ENOMEM;
+		goto free_bmp;
+	}
+
+	rspiocbq = lpfc_sli_get_iocbq(phba);
+	if (!rspiocbq) {
+		rc = -ENOMEM;
+		goto free_cmdiocbq;
+	}
+
+	rsp = &rspiocbq->iocb;
+
+	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+	if (!bmp->virt) {
+		rc = -ENOMEM;
+		goto free_rspiocbq;
+	}
+
+	INIT_LIST_HEAD(&bmp->list);
+	bpl = (struct ulp_bde64 *) bmp->virt;
+	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
+				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
+		busaddr = sg_dma_address(sgel);
+		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		bpl->tus.f.bdeSize = sg_dma_len(sgel);
+		bpl->tus.w = cpu_to_le32(bpl->tus.w);
+		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+		bpl++;
+	}
+
+	reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
+				job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
+		busaddr = sg_dma_address(sgel);
+		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+		bpl->tus.f.bdeSize = sg_dma_len(sgel);
+		bpl->tus.w = cpu_to_le32(bpl->tus.w);
+		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+		bpl++;
+	}
+
+	cmd = &cmdiocbq->iocb;
+	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
+	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+	cmd->un.genreq64.bdl.bdeSize =
+	    (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
+	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+	cmd->un.genreq64.w5.hcsw.Dfctl = 0;
+	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
+	cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
+	cmd->ulpBdeCount = 1;
+	cmd->ulpClass = CLASS3;
+	cmd->ulpOwner = OWN_CHIP;
+	cmd->ulpLe = 1; /* Limited Edition */
+	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+	cmdiocbq->vport = phba->pport;
+	/* We want the firmware to timeout before we do */
+	cmd->ulpTimeout = MENLO_TIMEOUT - 5;
+	cmdiocbq->context3 = bmp;
+	cmdiocbq->context2 = rspiocbq;
+	cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
+	cmdiocbq->context1 = dd_data;
+	cmdiocbq->context2 = rspiocbq;
+	if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
+		cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+		cmd->ulpPU = MENLO_PU; /* 3 */
+		cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
+		cmd->ulpContext = MENLO_CONTEXT; /* 0 */
+	} else {
+		cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
+		cmd->ulpPU = 1;
+		cmd->un.ulpWord[4] = 0;
+		cmd->ulpContext = menlo_cmd->xri;
+	}
+
+	dd_data->type = TYPE_MENLO;
+	dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
+	dd_data->context_un.menlo.rspiocbq = rspiocbq;
+	dd_data->context_un.menlo.set_job = job;
+	dd_data->context_un.menlo.bmp = bmp;
+
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
+		MENLO_TIMEOUT - 5);
+	if (rc == IOCB_SUCCESS)
+		return 0; /* done for now */
+
+	/* iocb failed so cleanup */
+	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+
+free_rspiocbq:
+	lpfc_sli_release_iocbq(phba, rspiocbq);
+free_cmdiocbq:
+	lpfc_sli_release_iocbq(phba, cmdiocbq);
+free_bmp:
+	kfree(bmp);
+free_dd:
+	kfree(dd_data);
+no_dd_data:
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	job->dd_data = NULL;
+	return rc;
+}
+
+/**
+ * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
+ * @job: fc_bsg_job to handle
+ **/
+static int
+lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
+{
+	int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
+	int rc;
+
+	switch (command) {
+	case LPFC_BSG_VENDOR_SET_CT_EVENT:
+		rc = lpfc_bsg_hba_set_event(job);
+		break;
+	case LPFC_BSG_VENDOR_GET_CT_EVENT:
+		rc = lpfc_bsg_hba_get_event(job);
+		break;
+	case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
+		rc = lpfc_bsg_send_mgmt_rsp(job);
+		break;
+	case LPFC_BSG_VENDOR_DIAG_MODE:
+		rc = lpfc_bsg_diag_loopback_mode(job);
+		break;
+	case LPFC_BSG_VENDOR_DIAG_MODE_END:
+		rc = lpfc_sli4_bsg_diag_mode_end(job);
+		break;
+	case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
+		rc = lpfc_bsg_diag_loopback_run(job);
+		break;
+	case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
+		rc = lpfc_sli4_bsg_link_diag_test(job);
+		break;
+	case LPFC_BSG_VENDOR_GET_MGMT_REV:
+		rc = lpfc_bsg_get_dfc_rev(job);
+		break;
+	case LPFC_BSG_VENDOR_MBOX:
+		rc = lpfc_bsg_mbox_cmd(job);
+		break;
+	case LPFC_BSG_VENDOR_MENLO_CMD:
+	case LPFC_BSG_VENDOR_MENLO_DATA:
+		rc = lpfc_menlo_cmd(job);
+		break;
+	default:
+		rc = -EINVAL;
+		job->reply->reply_payload_rcv_len = 0;
+		/* make error code available to userspace */
+		job->reply->result = rc;
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * lpfc_bsg_request - handle a bsg request from the FC transport
+ * @job: fc_bsg_job to handle
+ **/
+int
+lpfc_bsg_request(struct fc_bsg_job *job)
+{
+	uint32_t msgcode;
+	int rc;
+
+	msgcode = job->request->msgcode;
+	switch (msgcode) {
+	case FC_BSG_HST_VENDOR:
+		rc = lpfc_bsg_hst_vendor(job);
+		break;
+	case FC_BSG_RPT_ELS:
+		rc = lpfc_bsg_rport_els(job);
+		break;
+	case FC_BSG_RPT_CT:
+		rc = lpfc_bsg_send_mgmt_cmd(job);
+		break;
+	default:
+		rc = -EINVAL;
+		job->reply->reply_payload_rcv_len = 0;
+		/* make error code available to userspace */
+		job->reply->result = rc;
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
+ * @job: fc_bsg_job that has timed out
+ *
+ * This function just aborts the job's IOCB.  The aborted IOCB will return to
+ * the waiting function which will handle passing the error back to userspace
+ **/
+int
+lpfc_bsg_timeout(struct fc_bsg_job *job)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_iocbq *cmdiocb;
+	struct lpfc_bsg_event *evt;
+	struct lpfc_bsg_iocb *iocb;
+	struct lpfc_bsg_mbox *mbox;
+	struct lpfc_bsg_menlo *menlo;
+	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+	struct bsg_job_data *dd_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	dd_data = (struct bsg_job_data *)job->dd_data;
+	/* timeout and completion crossed paths if no dd_data */
+	if (!dd_data) {
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		return 0;
+	}
+
+	switch (dd_data->type) {
+	case TYPE_IOCB:
+		iocb = &dd_data->context_un.iocb;
+		cmdiocb = iocb->cmdiocbq;
+		/* hint to completion handler that the job timed out */
+		job->reply->result = -EAGAIN;
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		/* this will call our completion handler */
+		spin_lock_irq(&phba->hbalock);
+		lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+		spin_unlock_irq(&phba->hbalock);
+		break;
+	case TYPE_EVT:
+		evt = dd_data->context_un.evt;
+		/* this event has no job anymore */
+		evt->set_job = NULL;
+		job->dd_data = NULL;
+		job->reply->reply_payload_rcv_len = 0;
+		/* Return -EAGAIN which is our way of signallying the
+		 * app to retry.
+		 */
+		job->reply->result = -EAGAIN;
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		job->job_done(job);
+		break;
+	case TYPE_MBOX:
+		mbox = &dd_data->context_un.mbox;
+		/* this mbox has no job anymore */
+		mbox->set_job = NULL;
+		job->dd_data = NULL;
+		job->reply->reply_payload_rcv_len = 0;
+		job->reply->result = -EAGAIN;
+		/* the mbox completion handler can now be run */
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		job->job_done(job);
+		if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
+			phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
+		break;
+	case TYPE_MENLO:
+		menlo = &dd_data->context_un.menlo;
+		cmdiocb = menlo->cmdiocbq;
+		/* hint to completion handler that the job timed out */
+		job->reply->result = -EAGAIN;
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		/* this will call our completion handler */
+		spin_lock_irq(&phba->hbalock);
+		lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+		spin_unlock_irq(&phba->hbalock);
+		break;
+	default:
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		break;
+	}
+
+	/* scsi transport fc fc_bsg_job_timeout expects a zero return code,
+	 * otherwise an error message will be displayed on the console
+	 * so always return success (zero)
+	 */
+	return 0;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_bsg.h b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_bsg.h
new file mode 100644
index 0000000..edfe61f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_bsg.h
@@ -0,0 +1,285 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2010 Emulex.  All rights reserved.                *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+/* bsg definitions
+ * No pointers to user data are allowed, all application buffers and sizes will
+ * derived through the bsg interface.
+ *
+ * These are the vendor unique structures passed in using the bsg
+ * FC_BSG_HST_VENDOR message code type.
+ */
+#define LPFC_BSG_VENDOR_SET_CT_EVENT		1
+#define LPFC_BSG_VENDOR_GET_CT_EVENT		2
+#define LPFC_BSG_VENDOR_SEND_MGMT_RESP		3
+#define LPFC_BSG_VENDOR_DIAG_MODE		4
+#define LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK	5
+#define LPFC_BSG_VENDOR_GET_MGMT_REV		6
+#define LPFC_BSG_VENDOR_MBOX			7
+#define LPFC_BSG_VENDOR_MENLO_CMD		8
+#define LPFC_BSG_VENDOR_MENLO_DATA		9
+#define LPFC_BSG_VENDOR_DIAG_MODE_END		10
+#define LPFC_BSG_VENDOR_LINK_DIAG_TEST		11
+
+struct set_ct_event {
+	uint32_t command;
+	uint32_t type_mask;
+	uint32_t ev_req_id;
+	uint32_t ev_reg_id;
+};
+
+struct get_ct_event {
+	uint32_t command;
+	uint32_t ev_reg_id;
+	uint32_t ev_req_id;
+};
+
+struct get_ct_event_reply {
+	uint32_t immed_data;
+	uint32_t type;
+};
+
+struct send_mgmt_resp {
+	uint32_t command;
+	uint32_t tag;
+};
+
+
+#define INTERNAL_LOOP_BACK 0x1 /* adapter short cuts the loop internally */
+#define EXTERNAL_LOOP_BACK 0x2 /* requires an external loopback plug */
+
+struct diag_mode_set {
+	uint32_t command;
+	uint32_t type;
+	uint32_t timeout;
+};
+
+struct sli4_link_diag {
+	uint32_t command;
+	uint32_t timeout;
+	uint32_t test_id;
+	uint32_t loops;
+	uint32_t test_version;
+	uint32_t error_action;
+};
+
+struct diag_mode_test {
+	uint32_t command;
+};
+
+struct diag_status {
+	uint32_t mbox_status;
+	uint32_t shdr_status;
+	uint32_t shdr_add_status;
+};
+
+#define LPFC_WWNN_TYPE		0
+#define LPFC_WWPN_TYPE		1
+
+struct get_mgmt_rev {
+	uint32_t command;
+};
+
+#define MANAGEMENT_MAJOR_REV   1
+#define MANAGEMENT_MINOR_REV   1
+
+/* the MgmtRevInfo structure */
+struct MgmtRevInfo {
+	uint32_t a_Major;
+	uint32_t a_Minor;
+};
+
+struct get_mgmt_rev_reply {
+	struct MgmtRevInfo info;
+};
+
+#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */
+
+/* BSG mailbox request header */
+struct dfc_mbox_req {
+	uint32_t command;
+	uint32_t mbOffset;
+	uint32_t inExtWLen;
+	uint32_t outExtWLen;
+	uint32_t extMboxTag;
+	uint32_t extSeqNum;
+};
+
+/* Used for menlo command or menlo data. The xri is only used for menlo data */
+struct menlo_command {
+	uint32_t cmd;
+	uint32_t xri;
+};
+
+struct menlo_response {
+	uint32_t xri; /* return the xri of the iocb exchange */
+};
+
+/*
+ * macros and data structures for handling sli-config mailbox command
+ * pass-through support, this header file is shared between user and
+ * kernel spaces, note the set of macros are duplicates from lpfc_hw4.h,
+ * with macro names prefixed with bsg_, as the macros defined in
+ * lpfc_hw4.h are not accessible from user space.
+ */
+
+/* Macros to deal with bit fields. Each bit field must have 3 #defines
+ * associated with it (_SHIFT, _MASK, and _WORD).
+ * EG. For a bit field that is in the 7th bit of the "field4" field of a
+ * structure and is 2 bits in size the following #defines must exist:
+ *      struct temp {
+ *              uint32_t        field1;
+ *              uint32_t        field2;
+ *              uint32_t        field3;
+ *              uint32_t        field4;
+ *      #define example_bit_field_SHIFT         7
+ *      #define example_bit_field_MASK          0x03
+ *      #define example_bit_field_WORD          field4
+ *              uint32_t        field5;
+ *      };
+ * Then the macros below may be used to get or set the value of that field.
+ * EG. To get the value of the bit field from the above example:
+ *      struct temp t1;
+ *      value = bsg_bf_get(example_bit_field, &t1);
+ * And then to set that bit field:
+ *      bsg_bf_set(example_bit_field, &t1, 2);
+ * Or clear that bit field:
+ *      bsg_bf_set(example_bit_field, &t1, 0);
+ */
+#define bsg_bf_get_le32(name, ptr) \
+	((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK)
+#define bsg_bf_get(name, ptr) \
+	(((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
+#define bsg_bf_set_le32(name, ptr, value) \
+	((ptr)->name##_WORD = cpu_to_le32(((((value) & \
+	name##_MASK) << name##_SHIFT) | (le32_to_cpu((ptr)->name##_WORD) & \
+	~(name##_MASK << name##_SHIFT)))))
+#define bsg_bf_set(name, ptr, value) \
+	((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
+	((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
+
+/*
+ * The sli_config structure specified here is based on the following
+ * restriction:
+ *
+ * -- SLI_CONFIG EMB=0, carrying MSEs, will carry subcommands without
+ *    carrying HBD.
+ * -- SLI_CONFIG EMB=1, not carrying MSE, will carry subcommands with or
+ *    without carrying HBDs.
+ */
+
+struct lpfc_sli_config_mse {
+	uint32_t pa_lo;
+	uint32_t pa_hi;
+	uint32_t buf_len;
+#define lpfc_mbox_sli_config_mse_len_SHIFT	0
+#define lpfc_mbox_sli_config_mse_len_MASK	0xffffff
+#define lpfc_mbox_sli_config_mse_len_WORD	buf_len
+};
+
+struct lpfc_sli_config_hbd {
+	uint32_t buf_len;
+#define lpfc_mbox_sli_config_ecmn_hbd_len_SHIFT	0
+#define lpfc_mbox_sli_config_ecmn_hbd_len_MASK	0xffffff
+#define lpfc_mbox_sli_config_ecmn_hbd_len_WORD	buf_len
+	uint32_t pa_lo;
+	uint32_t pa_hi;
+};
+
+struct lpfc_sli_config_hdr {
+	uint32_t word1;
+#define lpfc_mbox_hdr_emb_SHIFT		0
+#define lpfc_mbox_hdr_emb_MASK		0x00000001
+#define lpfc_mbox_hdr_emb_WORD		word1
+#define lpfc_mbox_hdr_mse_cnt_SHIFT	3
+#define lpfc_mbox_hdr_mse_cnt_MASK	0x0000001f
+#define lpfc_mbox_hdr_mse_cnt_WORD	word1
+	uint32_t payload_length;
+	uint32_t tag_lo;
+	uint32_t tag_hi;
+	uint32_t reserved5;
+};
+
+struct lpfc_sli_config_emb0_subsys {
+	struct lpfc_sli_config_hdr	sli_config_hdr;
+#define LPFC_MBX_SLI_CONFIG_MAX_MSE     19
+	struct lpfc_sli_config_mse	mse[LPFC_MBX_SLI_CONFIG_MAX_MSE];
+	uint32_t padding;
+	uint32_t word64;
+#define lpfc_emb0_subcmnd_opcode_SHIFT	0
+#define lpfc_emb0_subcmnd_opcode_MASK	0xff
+#define lpfc_emb0_subcmnd_opcode_WORD	word64
+#define lpfc_emb0_subcmnd_subsys_SHIFT	8
+#define lpfc_emb0_subcmnd_subsys_MASK	0xff
+#define lpfc_emb0_subcmnd_subsys_WORD	word64
+/* Subsystem FCOE (0x0C) OpCodes */
+#define SLI_CONFIG_SUBSYS_FCOE		0x0C
+#define FCOE_OPCODE_READ_FCF		0x08
+#define FCOE_OPCODE_ADD_FCF		0x09
+};
+
+struct lpfc_sli_config_emb1_subsys {
+	struct lpfc_sli_config_hdr	sli_config_hdr;
+	uint32_t word6;
+#define lpfc_emb1_subcmnd_opcode_SHIFT	0
+#define lpfc_emb1_subcmnd_opcode_MASK	0xff
+#define lpfc_emb1_subcmnd_opcode_WORD	word6
+#define lpfc_emb1_subcmnd_subsys_SHIFT	8
+#define lpfc_emb1_subcmnd_subsys_MASK	0xff
+#define lpfc_emb1_subcmnd_subsys_WORD	word6
+/* Subsystem COMN (0x01) OpCodes */
+#define SLI_CONFIG_SUBSYS_COMN		0x01
+#define COMN_OPCODE_READ_OBJECT		0xAB
+#define COMN_OPCODE_WRITE_OBJECT	0xAC
+#define COMN_OPCODE_READ_OBJECT_LIST	0xAD
+#define COMN_OPCODE_DELETE_OBJECT	0xAE
+#define COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES	0x79
+	uint32_t timeout;
+	uint32_t request_length;
+	uint32_t word9;
+#define lpfc_subcmnd_version_SHIFT	0
+#define lpfc_subcmnd_version_MASK	0xff
+#define lpfc_subcmnd_version_WORD	word9
+	uint32_t word10;
+#define lpfc_subcmnd_ask_rd_len_SHIFT	0
+#define lpfc_subcmnd_ask_rd_len_MASK	0xffffff
+#define lpfc_subcmnd_ask_rd_len_WORD	word10
+	uint32_t rd_offset;
+	uint32_t obj_name[26];
+	uint32_t hbd_count;
+#define LPFC_MBX_SLI_CONFIG_MAX_HBD	8
+	struct lpfc_sli_config_hbd	hbd[LPFC_MBX_SLI_CONFIG_MAX_HBD];
+};
+
+struct lpfc_sli_config_mbox {
+	uint32_t word0;
+#define lpfc_mqe_status_SHIFT		16
+#define lpfc_mqe_status_MASK		0x0000FFFF
+#define lpfc_mqe_status_WORD		word0
+#define lpfc_mqe_command_SHIFT		8
+#define lpfc_mqe_command_MASK		0x000000FF
+#define lpfc_mqe_command_WORD		word0
+	union {
+		struct lpfc_sli_config_emb0_subsys sli_config_emb0_subsys;
+		struct lpfc_sli_config_emb1_subsys sli_config_emb1_subsys;
+	} un;
+};
+
+/* driver only */
+#define SLI_CONFIG_NOT_HANDLED		0
+#define SLI_CONFIG_HANDLED		1
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_compat.h b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_compat.h
new file mode 100644
index 0000000..c88e556
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_compat.h
@@ -0,0 +1,96 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+/*
+ * This file provides macros to aid compilation in the Linux 2.4 kernel
+ * over various platform architectures.
+ */
+
+/*******************************************************************
+Note: HBA's SLI memory contains little-endian LW.
+Thus to access it from a little-endian host,
+memcpy_toio() and memcpy_fromio() can be used.
+However on a big-endian host, copy 4 bytes at a time,
+using writel() and readl().
+ *******************************************************************/
+#include <asm/byteorder.h>
+
+#ifdef __BIG_ENDIAN
+
+static inline void
+lpfc_memcpy_to_slim(void __iomem *dest, void *src, unsigned int bytes)
+{
+	uint32_t __iomem *dest32;
+	uint32_t *src32;
+	unsigned int four_bytes;
+
+
+	dest32  = (uint32_t __iomem *) dest;
+	src32  = (uint32_t *) src;
+
+	/* write input bytes, 4 bytes at a time */
+	for (four_bytes = bytes /4; four_bytes > 0; four_bytes--) {
+		writel( *src32, dest32);
+		readl(dest32); /* flush */
+		dest32++;
+		src32++;
+	}
+
+	return;
+}
+
+static inline void
+lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
+{
+	uint32_t *dest32;
+	uint32_t __iomem *src32;
+	unsigned int four_bytes;
+
+
+	dest32  = (uint32_t *) dest;
+	src32  = (uint32_t __iomem *) src;
+
+	/* read input bytes, 4 bytes at a time */
+	for (four_bytes = bytes /4; four_bytes > 0; four_bytes--) {
+		*dest32 = readl( src32);
+		dest32++;
+		src32++;
+	}
+
+	return;
+}
+
+#else
+
+static inline void
+lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes)
+{
+	/* convert bytes in argument list to word count for copy function */
+	__iowrite32_copy(dest, src, bytes / sizeof(uint32_t));
+}
+
+static inline void
+lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
+{
+	/* actually returns 1 byte past dest */
+	memcpy_fromio( dest, src, bytes);
+}
+
+#endif	/* __BIG_ENDIAN */
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_crtn.h b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_crtn.h
new file mode 100644
index 0000000..330dd71
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_crtn.h
@@ -0,0 +1,465 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+typedef int (*node_filter)(struct lpfc_nodelist *, void *);
+
+struct fc_rport;
+void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_sli_read_link_ste(struct lpfc_hba *);
+void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
+void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
+int lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+
+void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *);
+void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_issue_clear_la(struct lpfc_hba *, struct lpfc_vport *);
+void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
+void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
+		 LPFC_MBOXQ_t *, uint16_t);
+void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
+void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_sli4_unreg_all_rpis(struct lpfc_vport *);
+
+void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
+void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
+			struct lpfc_nodelist *);
+void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
+void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
+void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_supported_pages(struct lpfcMboxq *);
+void lpfc_pc_sli4_params(struct lpfcMboxq *);
+int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
+			   uint16_t, uint16_t, bool);
+int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
+struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
+void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
+void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
+void lpfc_cleanup_rpis(struct lpfc_vport *, int);
+void lpfc_cleanup_pending_mbox(struct lpfc_vport *);
+int lpfc_linkdown(struct lpfc_hba *);
+void lpfc_linkdown_port(struct lpfc_vport *);
+void lpfc_port_link_failure(struct lpfc_vport *);
+void lpfc_mbx_cmpl_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
+void lpfc_retry_pport_discovery(struct lpfc_hba *);
+void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
+
+void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
+void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
+struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
+					struct lpfc_nodelist *, int);
+void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int);
+void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *);
+void lpfc_set_disctmo(struct lpfc_vport *);
+int  lpfc_can_disctmo(struct lpfc_vport *);
+int  lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *);
+void lpfc_unreg_all_rpis(struct lpfc_vport *);
+void lpfc_unreg_hba_rpis(struct lpfc_hba *);
+void lpfc_unreg_default_rpis(struct lpfc_vport *);
+void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *);
+
+int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
+			struct lpfc_iocbq *, struct lpfc_nodelist *);
+void lpfc_nlp_init(struct lpfc_vport *, struct lpfc_nodelist *, uint32_t);
+struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *);
+int  lpfc_nlp_put(struct lpfc_nodelist *);
+int  lpfc_nlp_not_used(struct lpfc_nodelist *ndlp);
+struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
+void lpfc_disc_list_loopmap(struct lpfc_vport *);
+void lpfc_disc_start(struct lpfc_vport *);
+void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
+void lpfc_cleanup(struct lpfc_vport *);
+void lpfc_disc_timeout(unsigned long);
+
+struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
+struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
+void lpfc_worker_wake_up(struct lpfc_hba *);
+int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
+int lpfc_do_work(void *);
+int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
+			    uint32_t);
+
+void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
+int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
+		     struct serv_parm *, uint32_t, int);
+int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
+void lpfc_more_plogi(struct lpfc_vport *);
+void lpfc_more_adisc(struct lpfc_vport *);
+void lpfc_end_rscn(struct lpfc_vport *);
+int lpfc_els_chk_latt(struct lpfc_vport *);
+int lpfc_els_abort_flogi(struct lpfc_hba *);
+int lpfc_initial_flogi(struct lpfc_vport *);
+void lpfc_issue_init_vfi(struct lpfc_vport *);
+int lpfc_initial_fdisc(struct lpfc_vport *);
+int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t);
+int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
+int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
+int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
+int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
+int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
+int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
+		     struct lpfc_nodelist *, LPFC_MBOXQ_t *);
+int lpfc_els_rsp_reject(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
+			struct lpfc_nodelist *, LPFC_MBOXQ_t *);
+int lpfc_els_rsp_adisc_acc(struct lpfc_vport *, struct lpfc_iocbq *,
+			   struct lpfc_nodelist *);
+int lpfc_els_rsp_prli_acc(struct lpfc_vport *, struct lpfc_iocbq *,
+			  struct lpfc_nodelist *);
+void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *, struct lpfc_nodelist *);
+void lpfc_els_retry_delay(unsigned long);
+void lpfc_els_retry_delay_handler(struct lpfc_nodelist *);
+void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+			  struct lpfc_iocbq *);
+int lpfc_els_handle_rscn(struct lpfc_vport *);
+void lpfc_els_flush_rscn(struct lpfc_vport *);
+int lpfc_rscn_payload_check(struct lpfc_vport *, uint32_t);
+void lpfc_els_flush_all_cmd(struct lpfc_hba *);
+void lpfc_els_flush_cmd(struct lpfc_vport *);
+int lpfc_els_disc_adisc(struct lpfc_vport *);
+int lpfc_els_disc_plogi(struct lpfc_vport *);
+void lpfc_els_timeout(unsigned long);
+void lpfc_els_timeout_handler(struct lpfc_vport *);
+struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t,
+				      uint8_t, struct lpfc_nodelist *,
+				      uint32_t, uint32_t);
+void lpfc_hb_timeout_handler(struct lpfc_hba *);
+
+void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+			 struct lpfc_iocbq *);
+void lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+				    struct lpfc_iocbq *);
+int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
+int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
+void lpfc_fdmi_tmo(unsigned long);
+void lpfc_fdmi_timeout_handler(struct lpfc_vport *);
+void lpfc_delayed_disc_tmo(unsigned long);
+void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
+
+int lpfc_config_port_prep(struct lpfc_hba *);
+void lpfc_update_vport_wwn(struct lpfc_vport *vport);
+int lpfc_config_port_post(struct lpfc_hba *);
+int lpfc_hba_down_prep(struct lpfc_hba *);
+int lpfc_hba_down_post(struct lpfc_hba *);
+void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
+int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int);
+void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
+int lpfc_online(struct lpfc_hba *);
+void lpfc_unblock_mgmt_io(struct lpfc_hba *);
+void lpfc_offline_prep(struct lpfc_hba *);
+void lpfc_offline(struct lpfc_hba *);
+void lpfc_reset_hba(struct lpfc_hba *);
+
+int lpfc_sli_setup(struct lpfc_hba *);
+int lpfc_sli_queue_setup(struct lpfc_hba *);
+
+void lpfc_handle_eratt(struct lpfc_hba *);
+void lpfc_handle_latt(struct lpfc_hba *);
+irqreturn_t lpfc_sli_intr_handler(int, void *);
+irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
+irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_sp_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
+
+void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
+void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
+LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
+void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_mbox_dev_check(struct lpfc_hba *);
+int lpfc_mbox_tmo_val(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
+void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
+void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t);
+void lpfc_unreg_vfi(struct lpfcMboxq *, struct lpfc_vport *);
+void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
+void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
+int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t);
+void lpfc_issue_init_vpi(struct lpfc_vport *);
+
+void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
+	uint32_t , LPFC_MBOXQ_t *);
+struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
+void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
+struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
+void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
+void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
+			uint16_t);
+void lpfc_unregister_fcf(struct lpfc_hba *);
+void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
+void lpfc_unregister_unused_fcf(struct lpfc_hba *);
+int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
+void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
+void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
+uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
+void lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
+void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
+void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
+
+int lpfc_mem_alloc(struct lpfc_hba *, int align);
+void lpfc_mem_free(struct lpfc_hba *);
+void lpfc_mem_free_all(struct lpfc_hba *);
+void lpfc_stop_vport_timers(struct lpfc_vport *);
+
+void lpfc_poll_timeout(unsigned long ptr);
+void lpfc_poll_start_timer(struct lpfc_hba *);
+void lpfc_poll_eratt(unsigned long);
+int
+lpfc_sli_handle_fast_ring_event(struct lpfc_hba *,
+			struct lpfc_sli_ring *, uint32_t);
+
+struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
+void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
+uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
+void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
+			   uint32_t);
+void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_selective_reset(struct lpfc_hba *);
+void lpfc_reset_barrier(struct lpfc_hba *);
+int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
+int lpfc_sli_brdkill(struct lpfc_hba *);
+int lpfc_sli_brdreset(struct lpfc_hba *);
+int lpfc_sli_brdrestart(struct lpfc_hba *);
+int lpfc_sli_hba_setup(struct lpfc_hba *);
+int lpfc_sli_config_port(struct lpfc_hba *, int);
+int lpfc_sli_host_down(struct lpfc_vport *);
+int lpfc_sli_hba_down(struct lpfc_hba *);
+int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+int lpfc_sli_handle_mb_event(struct lpfc_hba *);
+void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
+int lpfc_sli_check_eratt(struct lpfc_hba *);
+void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
+				    struct lpfc_sli_ring *, uint32_t);
+void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *);
+void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
+			struct lpfc_iocbq *, uint32_t);
+void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
+void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
+void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
+void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
+void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
+int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
+			     struct lpfc_dmabuf *);
+struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
+					     struct lpfc_sli_ring *,
+					     dma_addr_t);
+
+uint32_t lpfc_sli_get_buffer_tag(struct lpfc_hba *);
+struct lpfc_dmabuf * lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *,
+			struct lpfc_sli_ring *, uint32_t );
+
+int lpfc_sli_hbq_count(void);
+int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *, uint32_t);
+void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *);
+int lpfc_sli_hbq_size(void);
+int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
+			       struct lpfc_iocbq *);
+int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);
+int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
+			uint64_t, lpfc_ctx_cmd);
+
+void lpfc_mbox_timeout(unsigned long);
+void lpfc_mbox_timeout_handler(struct lpfc_hba *);
+
+struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
+struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
+					 struct lpfc_name *);
+
+int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+
+int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
+			     struct lpfc_iocbq *, struct lpfc_iocbq *,
+			     uint32_t);
+void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
+			     struct lpfc_iocbq *);
+
+void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
+
+void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *);
+void __lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
+void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
+
+void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
+/* Function prototypes. */
+const char* lpfc_info(struct Scsi_Host *);
+int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
+
+int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_api_table_setup(struct lpfc_hba *, uint8_t);
+
+void lpfc_get_cfgparam(struct lpfc_hba *);
+void lpfc_get_vport_cfgparam(struct lpfc_vport *);
+int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
+void lpfc_free_sysfs_attr(struct lpfc_vport *);
+extern struct device_attribute *lpfc_hba_attrs[];
+extern struct device_attribute *lpfc_vport_attrs[];
+extern struct scsi_host_template lpfc_template;
+extern struct scsi_host_template lpfc_vport_template;
+extern struct fc_function_template lpfc_transport_functions;
+extern struct fc_function_template lpfc_vport_transport_functions;
+extern int lpfc_sli_mode;
+extern int lpfc_enable_npiv;
+extern int lpfc_delay_discovery;
+
+int  lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
+int  lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *,	size_t);
+void lpfc_terminate_rport_io(struct fc_rport *);
+void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
+
+struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct device *);
+int  lpfc_vport_disable(struct fc_vport *fc_vport, bool disable);
+int lpfc_mbx_unreg_vpi(struct lpfc_vport *);
+void destroy_port(struct lpfc_vport *);
+int lpfc_get_instance(void);
+void lpfc_host_attrib_init(struct Scsi_Host *);
+
+extern void lpfc_debugfs_initialize(struct lpfc_vport *);
+extern void lpfc_debugfs_terminate(struct lpfc_vport *);
+extern void lpfc_debugfs_disc_trc(struct lpfc_vport *, int, char *, uint32_t,
+	uint32_t, uint32_t);
+extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
+	uint32_t, uint32_t);
+extern struct lpfc_hbq_init *lpfc_hbq_defs[];
+
+/* SLI4 if_type 2 externs. */
+int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *);
+int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *);
+int lpfc_sli4_get_allocated_extnts(struct lpfc_hba *, uint16_t,
+				   uint16_t *, uint16_t *);
+int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *, uint16_t,
+					  uint16_t *, uint16_t *);
+
+/* externs BlockGuard */
+extern char *_dump_buf_data;
+extern unsigned long _dump_buf_data_order;
+extern char *_dump_buf_dif;
+extern unsigned long _dump_buf_dif_order;
+extern spinlock_t _dump_buf_lock;
+extern int _dump_buf_done;
+extern spinlock_t pgcnt_lock;
+extern unsigned int pgcnt;
+extern unsigned int lpfc_prot_mask;
+extern unsigned char lpfc_prot_guard;
+
+/* Interface exported by fabric iocb scheduler */
+void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
+void lpfc_fabric_abort_hba(struct lpfc_hba *);
+void lpfc_fabric_block_timeout(unsigned long);
+void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
+void lpfc_rampdown_queue_depth(struct lpfc_hba *);
+void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
+void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
+void lpfc_scsi_dev_block(struct lpfc_hba *);
+
+void
+lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
+				struct lpfc_iocbq *);
+struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
+void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
+void lpfc_create_static_vport(struct lpfc_hba *);
+void lpfc_stop_hba_timers(struct lpfc_hba *);
+void lpfc_stop_port(struct lpfc_hba *);
+void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
+void lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
+void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
+int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
+void lpfc_start_fdiscs(struct lpfc_hba *phba);
+struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t);
+struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
+#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
+#define HBA_EVENT_RSCN                   5
+#define HBA_EVENT_LINK_UP                2
+#define HBA_EVENT_LINK_DOWN              3
+
+/* functions to support SGIOv4/bsg interface */
+int lpfc_bsg_request(struct fc_bsg_job *);
+int lpfc_bsg_timeout(struct fc_bsg_job *);
+int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+			     struct lpfc_iocbq *);
+void __lpfc_sli_ringtx_put(struct lpfc_hba *, struct lpfc_sli_ring *,
+	struct lpfc_iocbq *);
+struct lpfc_iocbq *lpfc_sli_ringtx_get(struct lpfc_hba *,
+	struct lpfc_sli_ring *);
+int __lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
+	struct lpfc_iocbq *, uint32_t);
+uint32_t lpfc_drain_txq(struct lpfc_hba *);
+void lpfc_clr_rrq_active(struct lpfc_hba *, uint16_t, struct lpfc_node_rrq *);
+int lpfc_test_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *, uint16_t);
+void lpfc_handle_rrq_active(struct lpfc_hba *);
+int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
+int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
+	uint16_t, uint16_t, uint16_t);
+uint16_t lpfc_sli4_xri_inrange(struct lpfc_hba *, uint16_t);
+void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
+void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
+struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
+	uint32_t);
+void lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *, enum nemb_type,
+	enum mbox_type, enum dma_type, enum sta_type,
+	struct lpfc_dmabuf *, uint32_t);
+void lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *, MAILBOX_t *);
+int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
+/* functions to support SR-IOV */
+int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
+uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *);
+int lpfc_sli4_queue_create(struct lpfc_hba *);
+void lpfc_sli4_queue_destroy(struct lpfc_hba *);
+void lpfc_sli4_abts_err_handler(struct lpfc_hba *, struct lpfc_nodelist *,
+				struct sli4_wcqe_xri_aborted *);
+int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t);
+int lpfc_issue_reg_vfi(struct lpfc_vport *);
+int lpfc_issue_unreg_vfi(struct lpfc_vport *);
+int lpfc_selective_reset(struct lpfc_hba *);
+int lpfc_sli4_read_config(struct lpfc_hba *phba);
+int lpfc_scsi_buf_update(struct lpfc_hba *phba);
+void lpfc_sli4_node_prep(struct lpfc_hba *phba);
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_ct.c b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_ct.c
new file mode 100644
index 0000000..93e96b3
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_ct.c
@@ -0,0 +1,1910 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2010 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+/*
+ * Fibre Channel SCSI LAN Device Driver CT support: FC Generic Services FC-GS
+ */
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/utsname.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_version.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
+
+/* FDMI Port Speed definitions */
+#define HBA_PORTSPEED_1GBIT		0x0001	/* 1 GBit/sec */
+#define HBA_PORTSPEED_2GBIT		0x0002	/* 2 GBit/sec */
+#define HBA_PORTSPEED_4GBIT		0x0008	/* 4 GBit/sec */
+#define HBA_PORTSPEED_10GBIT		0x0004	/* 10 GBit/sec */
+#define HBA_PORTSPEED_8GBIT		0x0010	/* 8 GBit/sec */
+#define HBA_PORTSPEED_16GBIT		0x0020	/* 16 GBit/sec */
+#define HBA_PORTSPEED_UNKNOWN		0x0800	/* Unknown */
+
+#define FOURBYTES	4
+
+
+static char *lpfc_release_version = LPFC_DRIVER_VERSION;
+
+static void
+lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
+			  struct lpfc_dmabuf *mp, uint32_t size)
+{
+	if (!mp) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+				"0146 Ignoring unsolicited CT No HBQ "
+				"status = x%x\n",
+				piocbq->iocb.ulpStatus);
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+			"0145 Ignoring unsolicted CT HBQ Size:%d "
+			"status = x%x\n",
+			size, piocbq->iocb.ulpStatus);
+}
+
+static void
+lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
+		     struct lpfc_dmabuf *mp, uint32_t size)
+{
+	lpfc_ct_ignore_hbq_buffer(phba, piocbq, mp, size);
+}
+
+void
+lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+		    struct lpfc_iocbq *piocbq)
+{
+	struct lpfc_dmabuf *mp = NULL;
+	IOCB_t *icmd = &piocbq->iocb;
+	int i;
+	struct lpfc_iocbq *iocbq;
+	dma_addr_t paddr;
+	uint32_t size;
+	struct list_head head;
+	struct lpfc_dmabuf *bdeBuf;
+
+	if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
+		return;
+
+	if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
+		lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
+	} else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+		((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
+		/* Not enough posted buffers; Try posting more buffers */
+		phba->fc_stat.NoRcvBuf++;
+		if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
+			lpfc_post_buffer(phba, pring, 2);
+		return;
+	}
+
+	/* If there are no BDEs associated with this IOCB,
+	 * there is nothing to do.
+	 */
+	if (icmd->ulpBdeCount == 0)
+		return;
+
+	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+		INIT_LIST_HEAD(&head);
+		list_add_tail(&head, &piocbq->list);
+		list_for_each_entry(iocbq, &head, list) {
+			icmd = &iocbq->iocb;
+			if (icmd->ulpBdeCount == 0)
+				continue;
+			bdeBuf = iocbq->context2;
+			iocbq->context2 = NULL;
+			size  = icmd->un.cont64[0].tus.f.bdeSize;
+			lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size);
+			lpfc_in_buf_free(phba, bdeBuf);
+			if (icmd->ulpBdeCount == 2) {
+				bdeBuf = iocbq->context3;
+				iocbq->context3 = NULL;
+				size  = icmd->unsli3.rcvsli3.bde2.tus.f.bdeSize;
+				lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf,
+						     size);
+				lpfc_in_buf_free(phba, bdeBuf);
+			}
+		}
+		list_del(&head);
+	} else {
+		INIT_LIST_HEAD(&head);
+		list_add_tail(&head, &piocbq->list);
+		list_for_each_entry(iocbq, &head, list) {
+			icmd = &iocbq->iocb;
+			if (icmd->ulpBdeCount == 0)
+				lpfc_ct_unsol_buffer(phba, iocbq, NULL, 0);
+			for (i = 0; i < icmd->ulpBdeCount; i++) {
+				paddr = getPaddr(icmd->un.cont64[i].addrHigh,
+						 icmd->un.cont64[i].addrLow);
+				mp = lpfc_sli_ringpostbuf_get(phba, pring,
+							      paddr);
+				size = icmd->un.cont64[i].tus.f.bdeSize;
+				lpfc_ct_unsol_buffer(phba, iocbq, mp, size);
+				lpfc_in_buf_free(phba, mp);
+			}
+			lpfc_post_buffer(phba, pring, i);
+		}
+		list_del(&head);
+	}
+}
+
+/**
+ * lpfc_sli4_ct_abort_unsol_event - Default handle for sli4 unsol abort
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to the driver internal I/O ring.
+ * @piocbq: Pointer to the IOCBQ.
+ *
+ * This function serves as the default handler for the sli4 unsolicited
+ * abort event. It shall be invoked when there is no application interface
+ * registered unsolicited abort handler. This handler does nothing but
+ * just simply releases the dma buffer used by the unsol abort event.
+ **/
+void
+lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *phba,
+			       struct lpfc_sli_ring *pring,
+			       struct lpfc_iocbq *piocbq)
+{
+	IOCB_t *icmd = &piocbq->iocb;
+	struct lpfc_dmabuf *bdeBuf;
+	uint32_t size;
+
+	/* Forward abort event to any process registered to receive ct event */
+	if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
+		return;
+
+	/* If there is no BDE associated with IOCB, there is nothing to do */
+	if (icmd->ulpBdeCount == 0)
+		return;
+	bdeBuf = piocbq->context2;
+	piocbq->context2 = NULL;
+	size  = icmd->un.cont64[0].tus.f.bdeSize;
+	lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size);
+	lpfc_in_buf_free(phba, bdeBuf);
+}
+
+static void
+lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
+{
+	struct lpfc_dmabuf *mlast, *next_mlast;
+
+	list_for_each_entry_safe(mlast, next_mlast, &mlist->list, list) {
+		lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
+		list_del(&mlast->list);
+		kfree(mlast);
+	}
+	lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
+	kfree(mlist);
+	return;
+}
+
+static struct lpfc_dmabuf *
+lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl,
+		  uint32_t size, int *entries)
+{
+	struct lpfc_dmabuf *mlist = NULL;
+	struct lpfc_dmabuf *mp;
+	int cnt, i = 0;
+
+	/* We get chunks of FCELSSIZE */
+	cnt = size > FCELSSIZE ? FCELSSIZE: size;
+
+	while (size) {
+		/* Allocate buffer for rsp payload */
+		mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+		if (!mp) {
+			if (mlist)
+				lpfc_free_ct_rsp(phba, mlist);
+			return NULL;
+		}
+
+		INIT_LIST_HEAD(&mp->list);
+
+		if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT) ||
+		    cmdcode == be16_to_cpu(SLI_CTNS_GFF_ID))
+			mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
+		else
+			mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
+
+		if (!mp->virt) {
+			kfree(mp);
+			if (mlist)
+				lpfc_free_ct_rsp(phba, mlist);
+			return NULL;
+		}
+
+		/* Queue it to a linked list */
+		if (!mlist)
+			mlist = mp;
+		else
+			list_add_tail(&mp->list, &mlist->list);
+
+		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+		/* build buffer ptr list for IOCB */
+		bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
+		bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
+		bpl->tus.f.bdeSize = (uint16_t) cnt;
+		bpl->tus.w = le32_to_cpu(bpl->tus.w);
+		bpl++;
+
+		i++;
+		size -= cnt;
+	}
+
+	*entries = i;
+	return mlist;
+}
+
+int
+lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
+{
+	struct lpfc_dmabuf *buf_ptr;
+
+	if (ctiocb->context_un.ndlp) {
+		lpfc_nlp_put(ctiocb->context_un.ndlp);
+		ctiocb->context_un.ndlp = NULL;
+	}
+	if (ctiocb->context1) {
+		buf_ptr = (struct lpfc_dmabuf *) ctiocb->context1;
+		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+		kfree(buf_ptr);
+		ctiocb->context1 = NULL;
+	}
+	if (ctiocb->context2) {
+		lpfc_free_ct_rsp(phba, (struct lpfc_dmabuf *) ctiocb->context2);
+		ctiocb->context2 = NULL;
+	}
+
+	if (ctiocb->context3) {
+		buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3;
+		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+		kfree(buf_ptr);
+		ctiocb->context1 = NULL;
+	}
+	lpfc_sli_release_iocbq(phba, ctiocb);
+	return 0;
+}
+
+static int
+lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
+	     struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp,
+	     void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+		     struct lpfc_iocbq *),
+	     struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry,
+	     uint32_t tmo, uint8_t retry)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	IOCB_t *icmd;
+	struct lpfc_iocbq *geniocb;
+	int rc;
+
+	/* Allocate buffer for  command iocb */
+	geniocb = lpfc_sli_get_iocbq(phba);
+
+	if (geniocb == NULL)
+		return 1;
+
+	icmd = &geniocb->iocb;
+	icmd->un.genreq64.bdl.ulpIoTag32 = 0;
+	icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+	icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+	icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+	icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64));
+
+	if (usr_flg)
+		geniocb->context3 = NULL;
+	else
+		geniocb->context3 = (uint8_t *) bmp;
+
+	/* Save for completion so we can release these resources */
+	geniocb->context1 = (uint8_t *) inp;
+	geniocb->context2 = (uint8_t *) outp;
+	geniocb->context_un.ndlp = lpfc_nlp_get(ndlp);
+
+	/* Fill in payload, bp points to frame payload */
+	icmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+
+	/* Fill in rest of iocb */
+	icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+	icmd->un.genreq64.w5.hcsw.Dfctl = 0;
+	icmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+	icmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
+
+	if (!tmo) {
+		 /* FC spec states we need 3 * ratov for CT requests */
+		tmo = (3 * phba->fc_ratov);
+	}
+	icmd->ulpTimeout = tmo;
+	icmd->ulpBdeCount = 1;
+	icmd->ulpLe = 1;
+	icmd->ulpClass = CLASS3;
+	icmd->ulpContext = ndlp->nlp_rpi;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+
+	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+		/* For GEN_REQUEST64_CR, use the RPI */
+		icmd->ulpCt_h = 0;
+		icmd->ulpCt_l = 0;
+	}
+
+	/* Issue GEN REQ IOCB for NPORT <did> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0119 Issue GEN REQ IOCB to NPORT x%x "
+			 "Data: x%x x%x\n",
+			 ndlp->nlp_DID, icmd->ulpIoTag,
+			 vport->port_state);
+	geniocb->iocb_cmpl = cmpl;
+	geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
+	geniocb->vport = vport;
+	geniocb->retry = retry;
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
+
+	if (rc == IOCB_ERROR) {
+		lpfc_sli_release_iocbq(phba, geniocb);
+		return 1;
+	}
+
+	return 0;
+}
+
+static int
+lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
+	    struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp,
+	    void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+			  struct lpfc_iocbq *),
+	    uint32_t rsp_size, uint8_t retry)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt;
+	struct lpfc_dmabuf *outmp;
+	int cnt = 0, status;
+	int cmdcode = ((struct lpfc_sli_ct_request *) inmp->virt)->
+		CommandResponse.bits.CmdRsp;
+
+	bpl++;			/* Skip past ct request */
+
+	/* Put buffer(s) for ct rsp in bpl */
+	outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt);
+	if (!outmp)
+		return -ENOMEM;
+	/*
+	 * Form the CT IOCB.  The total number of BDEs in this IOCB
+	 * is the single command plus response count from
+	 * lpfc_alloc_ct_rsp.
+	 */
+	cnt += 1;
+	status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0,
+			      cnt, 0, retry);
+	if (status) {
+		lpfc_free_ct_rsp(phba, outmp);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+struct lpfc_vport *
+lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) {
+	struct lpfc_vport *vport_curr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	list_for_each_entry(vport_curr, &phba->port_list, listentry) {
+		if ((vport_curr->fc_myDID) && (vport_curr->fc_myDID == did)) {
+			spin_unlock_irqrestore(&phba->hbalock, flags);
+			return vport_curr;
+		}
+	}
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	return NULL;
+}
+
+static int
+lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_sli_ct_request *Response =
+		(struct lpfc_sli_ct_request *) mp->virt;
+	struct lpfc_nodelist *ndlp = NULL;
+	struct lpfc_dmabuf *mlast, *next_mp;
+	uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType;
+	uint32_t Did, CTentry;
+	int Cnt;
+	struct list_head head;
+
+	lpfc_set_disctmo(vport);
+	vport->num_disc_nodes = 0;
+	vport->fc_ns_retry = 0;
+
+
+	list_add_tail(&head, &mp->list);
+	list_for_each_entry_safe(mp, next_mp, &head, list) {
+		mlast = mp;
+
+		Cnt = Size  > FCELSSIZE ? FCELSSIZE : Size;
+
+		Size -= Cnt;
+
+		if (!ctptr) {
+			ctptr = (uint32_t *) mlast->virt;
+		} else
+			Cnt -= 16;	/* subtract length of CT header */
+
+		/* Loop through entire NameServer list of DIDs */
+		while (Cnt >= sizeof (uint32_t)) {
+			/* Get next DID from NameServer List */
+			CTentry = *ctptr++;
+			Did = ((be32_to_cpu(CTentry)) & Mask_DID);
+
+			ndlp = NULL;
+
+			/*
+			 * Check for rscn processing or not
+			 * To conserve rpi's, filter out addresses for other
+			 * vports on the same physical HBAs.
+			 */
+			if ((Did != vport->fc_myDID) &&
+			    ((lpfc_find_vport_by_did(phba, Did) == NULL) ||
+			     vport->cfg_peer_port_login)) {
+				if ((vport->port_type != LPFC_NPIV_PORT) ||
+				    (!(vport->ct_flags & FC_CT_RFF_ID)) ||
+				    (!vport->cfg_restrict_login)) {
+					ndlp = lpfc_setup_disc_node(vport, Did);
+					if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+						lpfc_debugfs_disc_trc(vport,
+						LPFC_DISC_TRC_CT,
+						"Parse GID_FTrsp: "
+						"did:x%x flg:x%x x%x",
+						Did, ndlp->nlp_flag,
+						vport->fc_flag);
+
+						lpfc_printf_vlog(vport,
+							KERN_INFO,
+							LOG_DISCOVERY,
+							"0238 Process "
+							"x%x NameServer Rsp"
+							"Data: x%x x%x x%x\n",
+							Did, ndlp->nlp_flag,
+							vport->fc_flag,
+							vport->fc_rscn_id_cnt);
+					} else {
+						lpfc_debugfs_disc_trc(vport,
+						LPFC_DISC_TRC_CT,
+						"Skip1 GID_FTrsp: "
+						"did:x%x flg:x%x cnt:%d",
+						Did, vport->fc_flag,
+						vport->fc_rscn_id_cnt);
+
+						lpfc_printf_vlog(vport,
+							KERN_INFO,
+							LOG_DISCOVERY,
+							"0239 Skip x%x "
+							"NameServer Rsp Data: "
+							"x%x x%x\n",
+							Did, vport->fc_flag,
+							vport->fc_rscn_id_cnt);
+					}
+
+				} else {
+					if (!(vport->fc_flag & FC_RSCN_MODE) ||
+					(lpfc_rscn_payload_check(vport, Did))) {
+						lpfc_debugfs_disc_trc(vport,
+						LPFC_DISC_TRC_CT,
+						"Query GID_FTrsp: "
+						"did:x%x flg:x%x cnt:%d",
+						Did, vport->fc_flag,
+						vport->fc_rscn_id_cnt);
+
+						/* This NPortID was previously
+						 * a FCP target, * Don't even
+						 * bother to send GFF_ID.
+						 */
+						ndlp = lpfc_findnode_did(vport,
+							Did);
+						if (ndlp &&
+						    NLP_CHK_NODE_ACT(ndlp)
+						    && (ndlp->nlp_type &
+						     NLP_FCP_TARGET))
+							lpfc_setup_disc_node
+								(vport, Did);
+						else if (lpfc_ns_cmd(vport,
+							SLI_CTNS_GFF_ID,
+							0, Did) == 0)
+							vport->num_disc_nodes++;
+						else
+							lpfc_setup_disc_node
+								(vport, Did);
+					}
+					else {
+						lpfc_debugfs_disc_trc(vport,
+						LPFC_DISC_TRC_CT,
+						"Skip2 GID_FTrsp: "
+						"did:x%x flg:x%x cnt:%d",
+						Did, vport->fc_flag,
+						vport->fc_rscn_id_cnt);
+
+						lpfc_printf_vlog(vport,
+							KERN_INFO,
+							LOG_DISCOVERY,
+							"0245 Skip x%x "
+							"NameServer Rsp Data: "
+							"x%x x%x\n",
+							Did, vport->fc_flag,
+							vport->fc_rscn_id_cnt);
+					}
+				}
+			}
+			if (CTentry & (be32_to_cpu(SLI_CT_LAST_ENTRY)))
+				goto nsout1;
+			Cnt -= sizeof (uint32_t);
+		}
+		ctptr = NULL;
+
+	}
+
+nsout1:
+	list_del(&head);
+	return 0;
+}
+
+static void
+lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	IOCB_t *irsp;
+	struct lpfc_dmabuf *bmp;
+	struct lpfc_dmabuf *outp;
+	struct lpfc_sli_ct_request *CTrsp;
+	struct lpfc_nodelist *ndlp;
+	int rc;
+
+	/* First save ndlp, before we overwrite it */
+	ndlp = cmdiocb->context_un.ndlp;
+
+	/* we pass cmdiocb to state machine which needs rspiocb as well */
+	cmdiocb->context_un.rsp_iocb = rspiocb;
+
+	outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+	bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
+	irsp = &rspiocb->iocb;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+		 "GID_FT cmpl:     status:x%x/x%x rtry:%d",
+		irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry);
+
+	/* Don't bother processing response if vport is being torn down. */
+	if (vport->load_flag & FC_UNLOADING) {
+		if (vport->fc_flag & FC_RSCN_MODE)
+			lpfc_els_flush_rscn(vport);
+		goto out;
+	}
+
+	if (lpfc_els_chk_latt(vport)) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "0216 Link event during NS query\n");
+		if (vport->fc_flag & FC_RSCN_MODE)
+			lpfc_els_flush_rscn(vport);
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		goto out;
+	}
+	if (lpfc_error_lost_link(irsp)) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "0226 NS query failed due to link event\n");
+		if (vport->fc_flag & FC_RSCN_MODE)
+			lpfc_els_flush_rscn(vport);
+		goto out;
+	}
+	if (irsp->ulpStatus) {
+		/* Check for retry */
+		if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
+			if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
+			    irsp->un.ulpWord[4] != IOERR_NO_RESOURCES)
+				vport->fc_ns_retry++;
+
+			/* CT command is being retried */
+			rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
+					 vport->fc_ns_retry, 0);
+			if (rc == 0)
+				goto out;
+		}
+		if (vport->fc_flag & FC_RSCN_MODE)
+			lpfc_els_flush_rscn(vport);
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0257 GID_FT Query error: 0x%x 0x%x\n",
+				 irsp->ulpStatus, vport->fc_ns_retry);
+	} else {
+		/* Good status, continue checking */
+		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+		if (CTrsp->CommandResponse.bits.CmdRsp ==
+		    be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+					 "0208 NameServer Rsp Data: x%x\n",
+					 vport->fc_flag);
+			lpfc_ns_rsp(vport, outp,
+				    (uint32_t) (irsp->un.genreq64.bdl.bdeSize));
+		} else if (CTrsp->CommandResponse.bits.CmdRsp ==
+			   be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
+			/* NameServer Rsp Error */
+			if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ)
+			    && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) {
+				lpfc_printf_vlog(vport, KERN_INFO,
+					LOG_DISCOVERY,
+					"0269 No NameServer Entries "
+					"Data: x%x x%x x%x x%x\n",
+					CTrsp->CommandResponse.bits.CmdRsp,
+					(uint32_t) CTrsp->ReasonCode,
+					(uint32_t) CTrsp->Explanation,
+					vport->fc_flag);
+
+				lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+				"GID_FT no entry  cmd:x%x rsn:x%x exp:x%x",
+				(uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
+				(uint32_t) CTrsp->ReasonCode,
+				(uint32_t) CTrsp->Explanation);
+			} else {
+				lpfc_printf_vlog(vport, KERN_INFO,
+					LOG_DISCOVERY,
+					"0240 NameServer Rsp Error "
+					"Data: x%x x%x x%x x%x\n",
+					CTrsp->CommandResponse.bits.CmdRsp,
+					(uint32_t) CTrsp->ReasonCode,
+					(uint32_t) CTrsp->Explanation,
+					vport->fc_flag);
+
+				lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+				"GID_FT rsp err1  cmd:x%x rsn:x%x exp:x%x",
+				(uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
+				(uint32_t) CTrsp->ReasonCode,
+				(uint32_t) CTrsp->Explanation);
+			}
+
+
+		} else {
+			/* NameServer Rsp Error */
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+					"0241 NameServer Rsp Error "
+					"Data: x%x x%x x%x x%x\n",
+					CTrsp->CommandResponse.bits.CmdRsp,
+					(uint32_t) CTrsp->ReasonCode,
+					(uint32_t) CTrsp->Explanation,
+					vport->fc_flag);
+
+			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+				"GID_FT rsp err2  cmd:x%x rsn:x%x exp:x%x",
+				(uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
+				(uint32_t) CTrsp->ReasonCode,
+				(uint32_t) CTrsp->Explanation);
+		}
+	}
+	/* Link up / RSCN discovery */
+	if (vport->num_disc_nodes == 0) {
+		/*
+		 * The driver has cycled through all Nports in the RSCN payload.
+		 * Complete the handling by cleaning up and marking the
+		 * current driver state.
+		 */
+		if (vport->port_state >= LPFC_DISC_AUTH) {
+			if (vport->fc_flag & FC_RSCN_MODE) {
+				lpfc_els_flush_rscn(vport);
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
+				spin_unlock_irq(shost->host_lock);
+			}
+			else
+				lpfc_els_flush_rscn(vport);
+		}
+
+		lpfc_disc_start(vport);
+	}
+out:
+	cmdiocb->context_un.ndlp = ndlp; /* Now restore ndlp for free */
+	lpfc_ct_free_iocb(phba, cmdiocb);
+	return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	IOCB_t *irsp = &rspiocb->iocb;
+	struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1;
+	struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+	struct lpfc_sli_ct_request *CTrsp;
+	int did, rc, retry;
+	uint8_t fbits;
+	struct lpfc_nodelist *ndlp;
+
+	did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId;
+	did = be32_to_cpu(did);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+		"GFF_ID cmpl:     status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4], did);
+
+	if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+		/* Good status, continue checking */
+		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+		fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET];
+
+		if (CTrsp->CommandResponse.bits.CmdRsp ==
+		    be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
+			if ((fbits & FC4_FEATURE_INIT) &&
+			    !(fbits & FC4_FEATURE_TARGET)) {
+				lpfc_printf_vlog(vport, KERN_INFO,
+						 LOG_DISCOVERY,
+						 "0270 Skip x%x GFF "
+						 "NameServer Rsp Data: (init) "
+						 "x%x x%x\n", did, fbits,
+						 vport->fc_rscn_id_cnt);
+				goto out;
+			}
+		}
+	}
+	else {
+		/* Check for retry */
+		if (cmdiocb->retry < LPFC_MAX_NS_RETRY) {
+			retry = 1;
+			if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+				switch (irsp->un.ulpWord[4]) {
+				case IOERR_NO_RESOURCES:
+					/* We don't increment the retry
+					 * count for this case.
+					 */
+					break;
+				case IOERR_LINK_DOWN:
+				case IOERR_SLI_ABORTED:
+				case IOERR_SLI_DOWN:
+					retry = 0;
+					break;
+				default:
+					cmdiocb->retry++;
+				}
+			}
+			else
+				cmdiocb->retry++;
+
+			if (retry) {
+				/* CT command is being retried */
+				rc = lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID,
+					 cmdiocb->retry, did);
+				if (rc == 0) {
+					/* success */
+					lpfc_ct_free_iocb(phba, cmdiocb);
+					return;
+				}
+			}
+		}
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "0267 NameServer GFF Rsp "
+				 "x%x Error (%d %d) Data: x%x x%x\n",
+				 did, irsp->ulpStatus, irsp->un.ulpWord[4],
+				 vport->fc_flag, vport->fc_rscn_id_cnt);
+	}
+
+	/* This is a target port, unregistered port, or the GFF_ID failed */
+	ndlp = lpfc_setup_disc_node(vport, did);
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "0242 Process x%x GFF "
+				 "NameServer Rsp Data: x%x x%x x%x\n",
+				 did, ndlp->nlp_flag, vport->fc_flag,
+				 vport->fc_rscn_id_cnt);
+	} else {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "0243 Skip x%x GFF "
+				 "NameServer Rsp Data: x%x x%x\n", did,
+				 vport->fc_flag, vport->fc_rscn_id_cnt);
+	}
+out:
+	/* Link up / RSCN discovery */
+	if (vport->num_disc_nodes)
+		vport->num_disc_nodes--;
+	if (vport->num_disc_nodes == 0) {
+		/*
+		 * The driver has cycled through all Nports in the RSCN payload.
+		 * Complete the handling by cleaning up and marking the
+		 * current driver state.
+		 */
+		if (vport->port_state >= LPFC_DISC_AUTH) {
+			if (vport->fc_flag & FC_RSCN_MODE) {
+				lpfc_els_flush_rscn(vport);
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
+				spin_unlock_irq(shost->host_lock);
+			}
+			else
+				lpfc_els_flush_rscn(vport);
+		}
+		lpfc_disc_start(vport);
+	}
+	lpfc_ct_free_iocb(phba, cmdiocb);
+	return;
+}
+
+
+static void
+lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+	     struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct lpfc_dmabuf *inp;
+	struct lpfc_dmabuf *outp;
+	IOCB_t *irsp;
+	struct lpfc_sli_ct_request *CTrsp;
+	struct lpfc_nodelist *ndlp;
+	int cmdcode, rc;
+	uint8_t retry;
+	uint32_t latt;
+
+	/* First save ndlp, before we overwrite it */
+	ndlp = cmdiocb->context_un.ndlp;
+
+	/* we pass cmdiocb to state machine which needs rspiocb as well */
+	cmdiocb->context_un.rsp_iocb = rspiocb;
+
+	inp = (struct lpfc_dmabuf *) cmdiocb->context1;
+	outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+	irsp = &rspiocb->iocb;
+
+	cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)->
+					CommandResponse.bits.CmdRsp);
+	CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+
+	latt = lpfc_els_chk_latt(vport);
+
+	/* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0209 CT Request completes, latt %d, "
+			 "ulpStatus x%x CmdRsp x%x, Context x%x, Tag x%x\n",
+			 latt, irsp->ulpStatus,
+			 CTrsp->CommandResponse.bits.CmdRsp,
+			 cmdiocb->iocb.ulpContext, cmdiocb->iocb.ulpIoTag);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+		"CT cmd cmpl:     status:x%x/x%x cmd:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4], cmdcode);
+
+	if (irsp->ulpStatus) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "0268 NS cmd %x Error (%d %d)\n",
+				 cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]);
+
+		if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+			((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
+			 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)))
+			goto out;
+
+		retry = cmdiocb->retry;
+		if (retry >= LPFC_MAX_NS_RETRY)
+			goto out;
+
+		retry++;
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "0250 Retrying NS cmd %x\n", cmdcode);
+		rc = lpfc_ns_cmd(vport, cmdcode, retry, 0);
+		if (rc == 0)
+			goto out;
+	}
+
+out:
+	cmdiocb->context_un.ndlp = ndlp; /* Now restore ndlp for free */
+	lpfc_ct_free_iocb(phba, cmdiocb);
+	return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			struct lpfc_iocbq *rspiocb)
+{
+	IOCB_t *irsp = &rspiocb->iocb;
+	struct lpfc_vport *vport = cmdiocb->vport;
+
+	if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+		struct lpfc_dmabuf *outp;
+		struct lpfc_sli_ct_request *CTrsp;
+
+		outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+		if (CTrsp->CommandResponse.bits.CmdRsp ==
+		    be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
+			vport->ct_flags |= FC_CT_RFT_ID;
+	}
+	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+	return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			struct lpfc_iocbq *rspiocb)
+{
+	IOCB_t *irsp = &rspiocb->iocb;
+	struct lpfc_vport *vport = cmdiocb->vport;
+
+	if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+		struct lpfc_dmabuf *outp;
+		struct lpfc_sli_ct_request *CTrsp;
+
+		outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+		if (CTrsp->CommandResponse.bits.CmdRsp ==
+		    be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
+			vport->ct_flags |= FC_CT_RNN_ID;
+	}
+	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+	return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			 struct lpfc_iocbq *rspiocb)
+{
+	IOCB_t *irsp = &rspiocb->iocb;
+	struct lpfc_vport *vport = cmdiocb->vport;
+
+	if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+		struct lpfc_dmabuf *outp;
+		struct lpfc_sli_ct_request *CTrsp;
+
+		outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+		if (CTrsp->CommandResponse.bits.CmdRsp ==
+		    be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
+			vport->ct_flags |= FC_CT_RSPN_ID;
+	}
+	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+	return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			 struct lpfc_iocbq *rspiocb)
+{
+	IOCB_t *irsp = &rspiocb->iocb;
+	struct lpfc_vport *vport = cmdiocb->vport;
+
+	if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+		struct lpfc_dmabuf *outp;
+		struct lpfc_sli_ct_request *CTrsp;
+
+		outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+		if (CTrsp->CommandResponse.bits.CmdRsp ==
+		    be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
+			vport->ct_flags |= FC_CT_RSNN_NN;
+	}
+	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+	return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+
+	/* even if it fails we will act as though it succeeded. */
+	vport->ct_flags = 0;
+	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+	return;
+}
+
+static void
+lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			struct lpfc_iocbq *rspiocb)
+{
+	IOCB_t *irsp = &rspiocb->iocb;
+	struct lpfc_vport *vport = cmdiocb->vport;
+
+	if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+		struct lpfc_dmabuf *outp;
+		struct lpfc_sli_ct_request *CTrsp;
+
+		outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+		if (CTrsp->CommandResponse.bits.CmdRsp ==
+		    be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
+			vport->ct_flags |= FC_CT_RFF_ID;
+	}
+	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
+	return;
+}
+
+int
+lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
+	size_t size)
+{
+	int n;
+	uint8_t *wwn = vport->phba->wwpn;
+
+	n = snprintf(symbol, size,
+		     "Emulex PPN-%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
+		     wwn[0], wwn[1], wwn[2], wwn[3],
+		     wwn[4], wwn[5], wwn[6], wwn[7]);
+
+	if (vport->port_type == LPFC_PHYSICAL_PORT)
+		return n;
+
+	if (n < size)
+		n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi);
+
+	if (n < size &&
+	    strlen(vport->fc_vport->symbolic_name))
+		n += snprintf(symbol + n, size - n, " VName-%s",
+			      vport->fc_vport->symbolic_name);
+	return n;
+}
+
+int
+lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
+	size_t size)
+{
+	char fwrev[FW_REV_STR_SIZE];
+	int n;
+
+	lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
+
+	n = snprintf(symbol, size, "Emulex %s FV%s DV%s",
+		vport->phba->ModelName, fwrev, lpfc_release_version);
+	return n;
+}
+
+/*
+ * lpfc_ns_cmd
+ * Description:
+ *    Issue Cmd to NameServer
+ *       SLI_CTNS_GID_FT
+ *       LI_CTNS_RFT_ID
+ */
+int
+lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
+	    uint8_t retry, uint32_t context)
+{
+	struct lpfc_nodelist * ndlp;
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_dmabuf *mp, *bmp;
+	struct lpfc_sli_ct_request *CtReq;
+	struct ulp_bde64 *bpl;
+	void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+		      struct lpfc_iocbq *) = NULL;
+	uint32_t rsp_size = 1024;
+	size_t   size;
+	int rc = 0;
+
+	ndlp = lpfc_findnode_did(vport, NameServer_DID);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)
+	    || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) {
+		rc=1;
+		goto ns_cmd_exit;
+	}
+
+	/* fill in BDEs for command */
+	/* Allocate buffer for command payload */
+	mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+	if (!mp) {
+		rc=2;
+		goto ns_cmd_exit;
+	}
+
+	INIT_LIST_HEAD(&mp->list);
+	mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
+	if (!mp->virt) {
+		rc=3;
+		goto ns_cmd_free_mp;
+	}
+
+	/* Allocate buffer for Buffer ptr list */
+	bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+	if (!bmp) {
+		rc=4;
+		goto ns_cmd_free_mpvirt;
+	}
+
+	INIT_LIST_HEAD(&bmp->list);
+	bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys));
+	if (!bmp->virt) {
+		rc=5;
+		goto ns_cmd_free_bmp;
+	}
+
+	/* NameServer Req */
+	lpfc_printf_vlog(vport, KERN_INFO ,LOG_DISCOVERY,
+			 "0236 NameServer Req Data: x%x x%x x%x\n",
+			 cmdcode, vport->fc_flag, vport->fc_rscn_id_cnt);
+
+	bpl = (struct ulp_bde64 *) bmp->virt;
+	memset(bpl, 0, sizeof(struct ulp_bde64));
+	bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
+	bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
+	bpl->tus.f.bdeFlags = 0;
+	if (cmdcode == SLI_CTNS_GID_FT)
+		bpl->tus.f.bdeSize = GID_REQUEST_SZ;
+	else if (cmdcode == SLI_CTNS_GFF_ID)
+		bpl->tus.f.bdeSize = GFF_REQUEST_SZ;
+	else if (cmdcode == SLI_CTNS_RFT_ID)
+		bpl->tus.f.bdeSize = RFT_REQUEST_SZ;
+	else if (cmdcode == SLI_CTNS_RNN_ID)
+		bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
+	else if (cmdcode == SLI_CTNS_RSPN_ID)
+		bpl->tus.f.bdeSize = RSPN_REQUEST_SZ;
+	else if (cmdcode == SLI_CTNS_RSNN_NN)
+		bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
+	else if (cmdcode == SLI_CTNS_DA_ID)
+		bpl->tus.f.bdeSize = DA_ID_REQUEST_SZ;
+	else if (cmdcode == SLI_CTNS_RFF_ID)
+		bpl->tus.f.bdeSize = RFF_REQUEST_SZ;
+	else
+		bpl->tus.f.bdeSize = 0;
+	bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+	CtReq = (struct lpfc_sli_ct_request *) mp->virt;
+	memset(CtReq, 0, sizeof (struct lpfc_sli_ct_request));
+	CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
+	CtReq->RevisionId.bits.InId = 0;
+	CtReq->FsType = SLI_CT_DIRECTORY_SERVICE;
+	CtReq->FsSubType = SLI_CT_DIRECTORY_NAME_SERVER;
+	CtReq->CommandResponse.bits.Size = 0;
+	switch (cmdcode) {
+	case SLI_CTNS_GID_FT:
+		CtReq->CommandResponse.bits.CmdRsp =
+		    be16_to_cpu(SLI_CTNS_GID_FT);
+		CtReq->un.gid.Fc4Type = SLI_CTPT_FCP;
+		if (vport->port_state < LPFC_NS_QRY)
+			vport->port_state = LPFC_NS_QRY;
+		lpfc_set_disctmo(vport);
+		cmpl = lpfc_cmpl_ct_cmd_gid_ft;
+		rsp_size = FC_MAX_NS_RSP;
+		break;
+
+	case SLI_CTNS_GFF_ID:
+		CtReq->CommandResponse.bits.CmdRsp =
+			be16_to_cpu(SLI_CTNS_GFF_ID);
+		CtReq->un.gff.PortId = cpu_to_be32(context);
+		cmpl = lpfc_cmpl_ct_cmd_gff_id;
+		break;
+
+	case SLI_CTNS_RFT_ID:
+		vport->ct_flags &= ~FC_CT_RFT_ID;
+		CtReq->CommandResponse.bits.CmdRsp =
+		    be16_to_cpu(SLI_CTNS_RFT_ID);
+		CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID);
+		CtReq->un.rft.fcpReg = 1;
+		cmpl = lpfc_cmpl_ct_cmd_rft_id;
+		break;
+
+	case SLI_CTNS_RNN_ID:
+		vport->ct_flags &= ~FC_CT_RNN_ID;
+		CtReq->CommandResponse.bits.CmdRsp =
+		    be16_to_cpu(SLI_CTNS_RNN_ID);
+		CtReq->un.rnn.PortId = cpu_to_be32(vport->fc_myDID);
+		memcpy(CtReq->un.rnn.wwnn,  &vport->fc_nodename,
+		       sizeof (struct lpfc_name));
+		cmpl = lpfc_cmpl_ct_cmd_rnn_id;
+		break;
+
+	case SLI_CTNS_RSPN_ID:
+		vport->ct_flags &= ~FC_CT_RSPN_ID;
+		CtReq->CommandResponse.bits.CmdRsp =
+		    be16_to_cpu(SLI_CTNS_RSPN_ID);
+		CtReq->un.rspn.PortId = cpu_to_be32(vport->fc_myDID);
+		size = sizeof(CtReq->un.rspn.symbname);
+		CtReq->un.rspn.len =
+			lpfc_vport_symbolic_port_name(vport,
+			CtReq->un.rspn.symbname, size);
+		cmpl = lpfc_cmpl_ct_cmd_rspn_id;
+		break;
+	case SLI_CTNS_RSNN_NN:
+		vport->ct_flags &= ~FC_CT_RSNN_NN;
+		CtReq->CommandResponse.bits.CmdRsp =
+		    be16_to_cpu(SLI_CTNS_RSNN_NN);
+		memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename,
+		       sizeof (struct lpfc_name));
+		size = sizeof(CtReq->un.rsnn.symbname);
+		CtReq->un.rsnn.len =
+			lpfc_vport_symbolic_node_name(vport,
+			CtReq->un.rsnn.symbname, size);
+		cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
+		break;
+	case SLI_CTNS_DA_ID:
+		/* Implement DA_ID Nameserver request */
+		CtReq->CommandResponse.bits.CmdRsp =
+			be16_to_cpu(SLI_CTNS_DA_ID);
+		CtReq->un.da_id.port_id = cpu_to_be32(vport->fc_myDID);
+		cmpl = lpfc_cmpl_ct_cmd_da_id;
+		break;
+	case SLI_CTNS_RFF_ID:
+		vport->ct_flags &= ~FC_CT_RFF_ID;
+		CtReq->CommandResponse.bits.CmdRsp =
+		    be16_to_cpu(SLI_CTNS_RFF_ID);
+		CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID);
+		CtReq->un.rff.fbits = FC4_FEATURE_INIT;
+		CtReq->un.rff.type_code = FC_TYPE_FCP;
+		cmpl = lpfc_cmpl_ct_cmd_rff_id;
+		break;
+	}
+	/* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
+	 * to hold ndlp reference for the corresponding callback function.
+	 */
+	if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) {
+		/* On success, The cmpl function will free the buffers */
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+			"Issue CT cmd:    cmd:x%x did:x%x",
+			cmdcode, ndlp->nlp_DID, 0);
+		return 0;
+	}
+	rc=6;
+
+	/* Decrement ndlp reference count to release ndlp reference held
+	 * for the failed command's callback function.
+	 */
+	lpfc_nlp_put(ndlp);
+
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ns_cmd_free_bmp:
+	kfree(bmp);
+ns_cmd_free_mpvirt:
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ns_cmd_free_mp:
+	kfree(mp);
+ns_cmd_exit:
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+			 "0266 Issue NameServer Req x%x err %d Data: x%x x%x\n",
+			 cmdcode, rc, vport->fc_flag, vport->fc_rscn_id_cnt);
+	return 1;
+}
+
+static void
+lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		      struct lpfc_iocbq * rspiocb)
+{
+	struct lpfc_dmabuf *inp = cmdiocb->context1;
+	struct lpfc_dmabuf *outp = cmdiocb->context2;
+	struct lpfc_sli_ct_request *CTrsp = outp->virt;
+	struct lpfc_sli_ct_request *CTcmd = inp->virt;
+	struct lpfc_nodelist *ndlp;
+	uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
+	uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
+	struct lpfc_vport *vport = cmdiocb->vport;
+	IOCB_t *irsp = &rspiocb->iocb;
+	uint32_t latt;
+
+	latt = lpfc_els_chk_latt(vport);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+		"FDMI cmpl:       status:x%x/x%x latt:%d",
+		irsp->ulpStatus, irsp->un.ulpWord[4], latt);
+
+	if (latt || irsp->ulpStatus) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "0229 FDMI cmd %04x failed, latt = %d "
+				 "ulpStatus: x%x, rid x%x\n",
+				 be16_to_cpu(fdmi_cmd), latt, irsp->ulpStatus,
+				 irsp->un.ulpWord[4]);
+		lpfc_ct_free_iocb(phba, cmdiocb);
+		return;
+	}
+
+	ndlp = lpfc_findnode_did(vport, FDMI_DID);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+		goto fail_out;
+
+	if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
+		/* FDMI rsp failed */
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "0220 FDMI rsp failed Data: x%x\n",
+				 be16_to_cpu(fdmi_cmd));
+	}
+
+	switch (be16_to_cpu(fdmi_cmd)) {
+	case SLI_MGMT_RHBA:
+		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA);
+		break;
+
+	case SLI_MGMT_RPA:
+		break;
+
+	case SLI_MGMT_DHBA:
+		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT);
+		break;
+
+	case SLI_MGMT_DPRT:
+		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA);
+		break;
+	}
+
+fail_out:
+	lpfc_ct_free_iocb(phba, cmdiocb);
+	return;
+}
+
+int
+lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_dmabuf *mp, *bmp;
+	struct lpfc_sli_ct_request *CtReq;
+	struct ulp_bde64 *bpl;
+	uint32_t size;
+	REG_HBA *rh;
+	PORT_ENTRY *pe;
+	REG_PORT_ATTRIBUTE *pab;
+	ATTRIBUTE_BLOCK *ab;
+	ATTRIBUTE_ENTRY *ae;
+	void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+		      struct lpfc_iocbq *);
+
+
+	/* fill in BDEs for command */
+	/* Allocate buffer for command payload */
+	mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+	if (!mp)
+		goto fdmi_cmd_exit;
+
+	mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
+	if (!mp->virt)
+		goto fdmi_cmd_free_mp;
+
+	/* Allocate buffer for Buffer ptr list */
+	bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+	if (!bmp)
+		goto fdmi_cmd_free_mpvirt;
+
+	bmp->virt = lpfc_mbuf_alloc(phba, 0, &(bmp->phys));
+	if (!bmp->virt)
+		goto fdmi_cmd_free_bmp;
+
+	INIT_LIST_HEAD(&mp->list);
+	INIT_LIST_HEAD(&bmp->list);
+
+	/* FDMI request */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0218 FDMI Request Data: x%x x%x x%x\n",
+			 vport->fc_flag, vport->port_state, cmdcode);
+	CtReq = (struct lpfc_sli_ct_request *) mp->virt;
+
+	memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request));
+	CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
+	CtReq->RevisionId.bits.InId = 0;
+
+	CtReq->FsType = SLI_CT_MANAGEMENT_SERVICE;
+	CtReq->FsSubType = SLI_CT_FDMI_Subtypes;
+	size = 0;
+
+	switch (cmdcode) {
+	case SLI_MGMT_RHBA:
+		{
+			lpfc_vpd_t *vp = &phba->vpd;
+			uint32_t i, j, incr;
+			int len;
+
+			CtReq->CommandResponse.bits.CmdRsp =
+			    be16_to_cpu(SLI_MGMT_RHBA);
+			CtReq->CommandResponse.bits.Size = 0;
+			rh = (REG_HBA *) & CtReq->un.PortID;
+			memcpy(&rh->hi.PortName, &vport->fc_sparam.portName,
+			       sizeof (struct lpfc_name));
+			/* One entry (port) per adapter */
+			rh->rpl.EntryCnt = be32_to_cpu(1);
+			memcpy(&rh->rpl.pe, &vport->fc_sparam.portName,
+			       sizeof (struct lpfc_name));
+
+			/* point to the HBA attribute block */
+			size = 2 * sizeof (struct lpfc_name) + FOURBYTES;
+			ab = (ATTRIBUTE_BLOCK *) ((uint8_t *) rh + size);
+			ab->EntryCnt = 0;
+
+			/* Point to the beginning of the first HBA attribute
+			   entry */
+			/* #1 HBA attribute entry */
+			size += FOURBYTES;
+			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+			ae->ad.bits.AttrType = be16_to_cpu(NODE_NAME);
+			ae->ad.bits.AttrLen =  be16_to_cpu(FOURBYTES
+						+ sizeof (struct lpfc_name));
+			memcpy(&ae->un.NodeName, &vport->fc_sparam.nodeName,
+			       sizeof (struct lpfc_name));
+			ab->EntryCnt++;
+			size += FOURBYTES + sizeof (struct lpfc_name);
+
+			/* #2 HBA attribute entry */
+			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+			ae->ad.bits.AttrType = be16_to_cpu(MANUFACTURER);
+			strcpy(ae->un.Manufacturer, "Emulex Corporation");
+			len = strlen(ae->un.Manufacturer);
+			len += (len & 3) ? (4 - (len & 3)) : 4;
+			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+			ab->EntryCnt++;
+			size += FOURBYTES + len;
+
+			/* #3 HBA attribute entry */
+			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+			ae->ad.bits.AttrType = be16_to_cpu(SERIAL_NUMBER);
+			strcpy(ae->un.SerialNumber, phba->SerialNumber);
+			len = strlen(ae->un.SerialNumber);
+			len += (len & 3) ? (4 - (len & 3)) : 4;
+			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+			ab->EntryCnt++;
+			size += FOURBYTES + len;
+
+			/* #4 HBA attribute entry */
+			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+			ae->ad.bits.AttrType = be16_to_cpu(MODEL);
+			strcpy(ae->un.Model, phba->ModelName);
+			len = strlen(ae->un.Model);
+			len += (len & 3) ? (4 - (len & 3)) : 4;
+			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+			ab->EntryCnt++;
+			size += FOURBYTES + len;
+
+			/* #5 HBA attribute entry */
+			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+			ae->ad.bits.AttrType = be16_to_cpu(MODEL_DESCRIPTION);
+			strcpy(ae->un.ModelDescription, phba->ModelDesc);
+			len = strlen(ae->un.ModelDescription);
+			len += (len & 3) ? (4 - (len & 3)) : 4;
+			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+			ab->EntryCnt++;
+			size += FOURBYTES + len;
+
+			/* #6 HBA attribute entry */
+			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+			ae->ad.bits.AttrType = be16_to_cpu(HARDWARE_VERSION);
+			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 8);
+			/* Convert JEDEC ID to ascii for hardware version */
+			incr = vp->rev.biuRev;
+			for (i = 0; i < 8; i++) {
+				j = (incr & 0xf);
+				if (j <= 9)
+					ae->un.HardwareVersion[7 - i] =
+					    (char)((uint8_t) 0x30 +
+						   (uint8_t) j);
+				else
+					ae->un.HardwareVersion[7 - i] =
+					    (char)((uint8_t) 0x61 +
+						   (uint8_t) (j - 10));
+				incr = (incr >> 4);
+			}
+			ab->EntryCnt++;
+			size += FOURBYTES + 8;
+
+			/* #7 HBA attribute entry */
+			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+			ae->ad.bits.AttrType = be16_to_cpu(DRIVER_VERSION);
+			strcpy(ae->un.DriverVersion, lpfc_release_version);
+			len = strlen(ae->un.DriverVersion);
+			len += (len & 3) ? (4 - (len & 3)) : 4;
+			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+			ab->EntryCnt++;
+			size += FOURBYTES + len;
+
+			/* #8 HBA attribute entry */
+			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+			ae->ad.bits.AttrType = be16_to_cpu(OPTION_ROM_VERSION);
+			strcpy(ae->un.OptionROMVersion, phba->OptionROMVersion);
+			len = strlen(ae->un.OptionROMVersion);
+			len += (len & 3) ? (4 - (len & 3)) : 4;
+			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+			ab->EntryCnt++;
+			size += FOURBYTES + len;
+
+			/* #9 HBA attribute entry */
+			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+			ae->ad.bits.AttrType = be16_to_cpu(FIRMWARE_VERSION);
+			lpfc_decode_firmware_rev(phba, ae->un.FirmwareVersion,
+				1);
+			len = strlen(ae->un.FirmwareVersion);
+			len += (len & 3) ? (4 - (len & 3)) : 4;
+			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+			ab->EntryCnt++;
+			size += FOURBYTES + len;
+
+			/* #10 HBA attribute entry */
+			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+			ae->ad.bits.AttrType = be16_to_cpu(OS_NAME_VERSION);
+			sprintf(ae->un.OsNameVersion, "%s %s %s",
+				init_utsname()->sysname,
+				init_utsname()->release,
+				init_utsname()->version);
+			len = strlen(ae->un.OsNameVersion);
+			len += (len & 3) ? (4 - (len & 3)) : 4;
+			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+			ab->EntryCnt++;
+			size += FOURBYTES + len;
+
+			/* #11 HBA attribute entry */
+			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
+			ae->ad.bits.AttrType = be16_to_cpu(MAX_CT_PAYLOAD_LEN);
+			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
+			ae->un.MaxCTPayloadLen = (65 * 4096);
+			ab->EntryCnt++;
+			size += FOURBYTES + 4;
+
+			ab->EntryCnt = be32_to_cpu(ab->EntryCnt);
+			/* Total size */
+			size = GID_REQUEST_SZ - 4 + size;
+		}
+		break;
+
+	case SLI_MGMT_RPA:
+		{
+			lpfc_vpd_t *vp;
+			struct serv_parm *hsp;
+			int len;
+
+			vp = &phba->vpd;
+
+			CtReq->CommandResponse.bits.CmdRsp =
+			    be16_to_cpu(SLI_MGMT_RPA);
+			CtReq->CommandResponse.bits.Size = 0;
+			pab = (REG_PORT_ATTRIBUTE *) & CtReq->un.PortID;
+			size = sizeof (struct lpfc_name) + FOURBYTES;
+			memcpy((uint8_t *) & pab->PortName,
+			       (uint8_t *) & vport->fc_sparam.portName,
+			       sizeof (struct lpfc_name));
+			pab->ab.EntryCnt = 0;
+
+			/* #1 Port attribute entry */
+			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
+			ae->ad.bits.AttrType = be16_to_cpu(SUPPORTED_FC4_TYPES);
+			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 32);
+			ae->un.SupportFC4Types[2] = 1;
+			ae->un.SupportFC4Types[7] = 1;
+			pab->ab.EntryCnt++;
+			size += FOURBYTES + 32;
+
+			/* #2 Port attribute entry */
+			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
+			ae->ad.bits.AttrType = be16_to_cpu(SUPPORTED_SPEED);
+			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
+
+			ae->un.SupportSpeed = 0;
+			if (phba->lmt & LMT_16Gb)
+				ae->un.SupportSpeed |= HBA_PORTSPEED_16GBIT;
+			if (phba->lmt & LMT_10Gb)
+				ae->un.SupportSpeed |= HBA_PORTSPEED_10GBIT;
+			if (phba->lmt & LMT_8Gb)
+				ae->un.SupportSpeed |= HBA_PORTSPEED_8GBIT;
+			if (phba->lmt & LMT_4Gb)
+				ae->un.SupportSpeed |= HBA_PORTSPEED_4GBIT;
+			if (phba->lmt & LMT_2Gb)
+				ae->un.SupportSpeed |= HBA_PORTSPEED_2GBIT;
+			if (phba->lmt & LMT_1Gb)
+				ae->un.SupportSpeed |= HBA_PORTSPEED_1GBIT;
+
+			pab->ab.EntryCnt++;
+			size += FOURBYTES + 4;
+
+			/* #3 Port attribute entry */
+			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
+			ae->ad.bits.AttrType = be16_to_cpu(PORT_SPEED);
+			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
+			switch(phba->fc_linkspeed) {
+			case LPFC_LINK_SPEED_1GHZ:
+				ae->un.PortSpeed = HBA_PORTSPEED_1GBIT;
+				break;
+			case LPFC_LINK_SPEED_2GHZ:
+				ae->un.PortSpeed = HBA_PORTSPEED_2GBIT;
+				break;
+			case LPFC_LINK_SPEED_4GHZ:
+				ae->un.PortSpeed = HBA_PORTSPEED_4GBIT;
+				break;
+			case LPFC_LINK_SPEED_8GHZ:
+				ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
+				break;
+			case LPFC_LINK_SPEED_10GHZ:
+				ae->un.PortSpeed = HBA_PORTSPEED_10GBIT;
+				break;
+			case LPFC_LINK_SPEED_16GHZ:
+				ae->un.PortSpeed = HBA_PORTSPEED_16GBIT;
+				break;
+			default:
+				ae->un.PortSpeed = HBA_PORTSPEED_UNKNOWN;
+				break;
+			}
+			pab->ab.EntryCnt++;
+			size += FOURBYTES + 4;
+
+			/* #4 Port attribute entry */
+			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
+			ae->ad.bits.AttrType = be16_to_cpu(MAX_FRAME_SIZE);
+			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
+			hsp = (struct serv_parm *) & vport->fc_sparam;
+			ae->un.MaxFrameSize =
+			    (((uint32_t) hsp->cmn.
+			      bbRcvSizeMsb) << 8) | (uint32_t) hsp->cmn.
+			    bbRcvSizeLsb;
+			pab->ab.EntryCnt++;
+			size += FOURBYTES + 4;
+
+			/* #5 Port attribute entry */
+			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
+			ae->ad.bits.AttrType = be16_to_cpu(OS_DEVICE_NAME);
+			strcpy((char *)ae->un.OsDeviceName, LPFC_DRIVER_NAME);
+			len = strlen((char *)ae->un.OsDeviceName);
+			len += (len & 3) ? (4 - (len & 3)) : 4;
+			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
+			pab->ab.EntryCnt++;
+			size += FOURBYTES + len;
+
+			if (vport->cfg_fdmi_on == 2) {
+				/* #6 Port attribute entry */
+				ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab +
+							  size);
+				ae->ad.bits.AttrType = be16_to_cpu(HOST_NAME);
+				sprintf(ae->un.HostName, "%s",
+					init_utsname()->nodename);
+				len = strlen(ae->un.HostName);
+				len += (len & 3) ? (4 - (len & 3)) : 4;
+				ae->ad.bits.AttrLen =
+				    be16_to_cpu(FOURBYTES + len);
+				pab->ab.EntryCnt++;
+				size += FOURBYTES + len;
+			}
+
+			pab->ab.EntryCnt = be32_to_cpu(pab->ab.EntryCnt);
+			/* Total size */
+			size = GID_REQUEST_SZ - 4 + size;
+		}
+		break;
+
+	case SLI_MGMT_DHBA:
+		CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_MGMT_DHBA);
+		CtReq->CommandResponse.bits.Size = 0;
+		pe = (PORT_ENTRY *) & CtReq->un.PortID;
+		memcpy((uint8_t *) & pe->PortName,
+		       (uint8_t *) & vport->fc_sparam.portName,
+		       sizeof (struct lpfc_name));
+		size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
+		break;
+
+	case SLI_MGMT_DPRT:
+		CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_MGMT_DPRT);
+		CtReq->CommandResponse.bits.Size = 0;
+		pe = (PORT_ENTRY *) & CtReq->un.PortID;
+		memcpy((uint8_t *) & pe->PortName,
+		       (uint8_t *) & vport->fc_sparam.portName,
+		       sizeof (struct lpfc_name));
+		size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
+		break;
+	}
+
+	bpl = (struct ulp_bde64 *) bmp->virt;
+	bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
+	bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
+	bpl->tus.f.bdeFlags = 0;
+	bpl->tus.f.bdeSize = size;
+	bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+	cmpl = lpfc_cmpl_ct_cmd_fdmi;
+
+	/* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
+	 * to hold ndlp reference for the corresponding callback function.
+	 */
+	if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0))
+		return 0;
+
+	/* Decrement ndlp reference count to release ndlp reference held
+	 * for the failed command's callback function.
+	 */
+	lpfc_nlp_put(ndlp);
+
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+fdmi_cmd_free_bmp:
+	kfree(bmp);
+fdmi_cmd_free_mpvirt:
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+fdmi_cmd_free_mp:
+	kfree(mp);
+fdmi_cmd_exit:
+	/* Issue FDMI request failed */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0244 Issue FDMI request failed Data: x%x\n",
+			 cmdcode);
+	return 1;
+}
+
+/**
+ * lpfc_delayed_disc_tmo - Timeout handler for delayed discovery timer.
+ * @ptr - Context object of the timer.
+ *
+ * This function set the WORKER_DELAYED_DISC_TMO flag and wake up
+ * the worker thread.
+ **/
+void
+lpfc_delayed_disc_tmo(unsigned long ptr)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t tmo_posted;
+	unsigned long iflag;
+
+	spin_lock_irqsave(&vport->work_port_lock, iflag);
+	tmo_posted = vport->work_port_events & WORKER_DELAYED_DISC_TMO;
+	if (!tmo_posted)
+		vport->work_port_events |= WORKER_DELAYED_DISC_TMO;
+	spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+	return;
+}
+
+/**
+ * lpfc_delayed_disc_timeout_handler - Function called by worker thread to
+ *      handle delayed discovery.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This function start nport discovery of the vport.
+ **/
+void
+lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	spin_lock_irq(shost->host_lock);
+	if (!(vport->fc_flag & FC_DISC_DELAYED)) {
+		spin_unlock_irq(shost->host_lock);
+		return;
+	}
+	vport->fc_flag &= ~FC_DISC_DELAYED;
+	spin_unlock_irq(shost->host_lock);
+
+	lpfc_do_scr_ns_plogi(vport->phba, vport);
+}
+
+void
+lpfc_fdmi_tmo(unsigned long ptr)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t tmo_posted;
+	unsigned long iflag;
+
+	spin_lock_irqsave(&vport->work_port_lock, iflag);
+	tmo_posted = vport->work_port_events & WORKER_FDMI_TMO;
+	if (!tmo_posted)
+		vport->work_port_events |= WORKER_FDMI_TMO;
+	spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+	return;
+}
+
+void
+lpfc_fdmi_timeout_handler(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp;
+
+	ndlp = lpfc_findnode_did(vport, FDMI_DID);
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+		if (init_utsname()->nodename[0] != '\0')
+			lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
+		else
+			mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
+	}
+	return;
+}
+
+void
+lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	lpfc_vpd_t *vp = &phba->vpd;
+	uint32_t b1, b2, b3, b4, i, rev;
+	char c;
+	uint32_t *ptr, str[4];
+	uint8_t *fwname;
+
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		snprintf(fwrevision, FW_REV_STR_SIZE, "%s", vp->rev.opFwName);
+	else if (vp->rev.rBit) {
+		if (psli->sli_flag & LPFC_SLI_ACTIVE)
+			rev = vp->rev.sli2FwRev;
+		else
+			rev = vp->rev.sli1FwRev;
+
+		b1 = (rev & 0x0000f000) >> 12;
+		b2 = (rev & 0x00000f00) >> 8;
+		b3 = (rev & 0x000000c0) >> 6;
+		b4 = (rev & 0x00000030) >> 4;
+
+		switch (b4) {
+		case 0:
+			c = 'N';
+			break;
+		case 1:
+			c = 'A';
+			break;
+		case 2:
+			c = 'B';
+			break;
+		case 3:
+			c = 'X';
+			break;
+		default:
+			c = 0;
+			break;
+		}
+		b4 = (rev & 0x0000000f);
+
+		if (psli->sli_flag & LPFC_SLI_ACTIVE)
+			fwname = vp->rev.sli2FwName;
+		else
+			fwname = vp->rev.sli1FwName;
+
+		for (i = 0; i < 16; i++)
+			if (fwname[i] == 0x20)
+				fwname[i] = 0;
+
+		ptr = (uint32_t*)fwname;
+
+		for (i = 0; i < 3; i++)
+			str[i] = be32_to_cpu(*ptr++);
+
+		if (c == 0) {
+			if (flag)
+				sprintf(fwrevision, "%d.%d%d (%s)",
+					b1, b2, b3, (char *)str);
+			else
+				sprintf(fwrevision, "%d.%d%d", b1,
+					b2, b3);
+		} else {
+			if (flag)
+				sprintf(fwrevision, "%d.%d%d%c%d (%s)",
+					b1, b2, b3, c,
+					b4, (char *)str);
+			else
+				sprintf(fwrevision, "%d.%d%d%c%d",
+					b1, b2, b3, c, b4);
+		}
+	} else {
+		rev = vp->rev.smFwRev;
+
+		b1 = (rev & 0xff000000) >> 24;
+		b2 = (rev & 0x00f00000) >> 20;
+		b3 = (rev & 0x000f0000) >> 16;
+		c  = (rev & 0x0000ff00) >> 8;
+		b4 = (rev & 0x000000ff);
+
+		sprintf(fwrevision, "%d.%d%d%c%d", b1, b2, b3, c, b4);
+	}
+	return;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_debugfs.c b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_debugfs.c
new file mode 100644
index 0000000..af04b0d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -0,0 +1,4468 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2007-2012 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/ctype.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_version.h"
+#include "lpfc_compat.h"
+#include "lpfc_debugfs.h"
+#include "lpfc_bsg.h"
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+/*
+ * debugfs interface
+ *
+ * To access this interface the user should:
+ * # mount -t debugfs none /sys/kernel/debug
+ *
+ * The lpfc debugfs directory hierarchy is:
+ * /sys/kernel/debug/lpfc/fnX/vportY
+ * where X is the lpfc hba function unique_id
+ * where Y is the vport VPI on that hba
+ *
+ * Debugging services available per vport:
+ * discovery_trace
+ * This is an ACSII readable file that contains a trace of the last
+ * lpfc_debugfs_max_disc_trc events that happened on a specific vport.
+ * See lpfc_debugfs.h for different categories of  discovery events.
+ * To enable the discovery trace, the following module parameters must be set:
+ * lpfc_debugfs_enable=1         Turns on lpfc debugfs filesystem support
+ * lpfc_debugfs_max_disc_trc=X   Where X is the event trace depth for
+ *                               EACH vport. X MUST also be a power of 2.
+ * lpfc_debugfs_mask_disc_trc=Y  Where Y is an event mask as defined in
+ *                               lpfc_debugfs.h .
+ *
+ * slow_ring_trace
+ * This is an ACSII readable file that contains a trace of the last
+ * lpfc_debugfs_max_slow_ring_trc events that happened on a specific HBA.
+ * To enable the slow ring trace, the following module parameters must be set:
+ * lpfc_debugfs_enable=1         Turns on lpfc debugfs filesystem support
+ * lpfc_debugfs_max_slow_ring_trc=X   Where X is the event trace depth for
+ *                               the HBA. X MUST also be a power of 2.
+ */
+static int lpfc_debugfs_enable = 1;
+module_param(lpfc_debugfs_enable, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services");
+
+/* This MUST be a power of 2 */
+static int lpfc_debugfs_max_disc_trc;
+module_param(lpfc_debugfs_max_disc_trc, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc,
+	"Set debugfs discovery trace depth");
+
+/* This MUST be a power of 2 */
+static int lpfc_debugfs_max_slow_ring_trc;
+module_param(lpfc_debugfs_max_slow_ring_trc, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc,
+	"Set debugfs slow ring trace depth");
+
+static int lpfc_debugfs_mask_disc_trc;
+module_param(lpfc_debugfs_mask_disc_trc, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
+	"Set debugfs discovery trace mask");
+
+#include <linux/debugfs.h>
+
+static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
+static unsigned long lpfc_debugfs_start_time = 0L;
+
+/* iDiag */
+static struct lpfc_idiag idiag;
+
+/**
+ * lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer
+ * @vport: The vport to gather the log info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine gathers the lpfc discovery debugfs data from the @vport and
+ * dumps it to @buf up to @size number of bytes. It will start at the next entry
+ * in the log and process the log until the end of the buffer. Then it will
+ * gather from the beginning of the log and process until the current entry.
+ *
+ * Notes:
+ * Discovery logging will be disabled while while this routine dumps the log.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
+{
+	int i, index, len, enable;
+	uint32_t ms;
+	struct lpfc_debugfs_trc *dtp;
+	char *buffer;
+
+	buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL);
+	if (!buffer)
+		return 0;
+
+	enable = lpfc_debugfs_enable;
+	lpfc_debugfs_enable = 0;
+
+	len = 0;
+	index = (atomic_read(&vport->disc_trc_cnt) + 1) &
+		(lpfc_debugfs_max_disc_trc - 1);
+	for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
+		dtp = vport->disc_trc + i;
+		if (!dtp->fmt)
+			continue;
+		ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
+		snprintf(buffer,
+			LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
+			dtp->seq_cnt, ms, dtp->fmt);
+		len +=  snprintf(buf+len, size-len, buffer,
+			dtp->data1, dtp->data2, dtp->data3);
+	}
+	for (i = 0; i < index; i++) {
+		dtp = vport->disc_trc + i;
+		if (!dtp->fmt)
+			continue;
+		ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
+		snprintf(buffer,
+			LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
+			dtp->seq_cnt, ms, dtp->fmt);
+		len +=  snprintf(buf+len, size-len, buffer,
+			dtp->data1, dtp->data2, dtp->data3);
+	}
+
+	lpfc_debugfs_enable = enable;
+	kfree(buffer);
+
+	return len;
+}
+
+/**
+ * lpfc_debugfs_slow_ring_trc_data - Dump slow ring logging to a buffer
+ * @phba: The HBA to gather the log info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine gathers the lpfc slow ring debugfs data from the @phba and
+ * dumps it to @buf up to @size number of bytes. It will start at the next entry
+ * in the log and process the log until the end of the buffer. Then it will
+ * gather from the beginning of the log and process until the current entry.
+ *
+ * Notes:
+ * Slow ring logging will be disabled while while this routine dumps the log.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
+{
+	int i, index, len, enable;
+	uint32_t ms;
+	struct lpfc_debugfs_trc *dtp;
+	char *buffer;
+
+	buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL);
+	if (!buffer)
+		return 0;
+
+	enable = lpfc_debugfs_enable;
+	lpfc_debugfs_enable = 0;
+
+	len = 0;
+	index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
+		(lpfc_debugfs_max_slow_ring_trc - 1);
+	for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
+		dtp = phba->slow_ring_trc + i;
+		if (!dtp->fmt)
+			continue;
+		ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
+		snprintf(buffer,
+			LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
+			dtp->seq_cnt, ms, dtp->fmt);
+		len +=  snprintf(buf+len, size-len, buffer,
+			dtp->data1, dtp->data2, dtp->data3);
+	}
+	for (i = 0; i < index; i++) {
+		dtp = phba->slow_ring_trc + i;
+		if (!dtp->fmt)
+			continue;
+		ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
+		snprintf(buffer,
+			LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
+			dtp->seq_cnt, ms, dtp->fmt);
+		len +=  snprintf(buf+len, size-len, buffer,
+			dtp->data1, dtp->data2, dtp->data3);
+	}
+
+	lpfc_debugfs_enable = enable;
+	kfree(buffer);
+
+	return len;
+}
+
+static int lpfc_debugfs_last_hbq = -1;
+
+/**
+ * lpfc_debugfs_hbqinfo_data - Dump host buffer queue info to a buffer
+ * @phba: The HBA to gather host buffer info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the host buffer queue info from the @phba to @buf up to
+ * @size number of bytes. A header that describes the current hbq state will be
+ * dumped to @buf first and then info on each hbq entry will be dumped to @buf
+ * until @size bytes have been dumped or all the hbq info has been dumped.
+ *
+ * Notes:
+ * This routine will rotate through each configured HBQ each time called.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
+{
+	int len = 0;
+	int cnt, i, j, found, posted, low;
+	uint32_t phys, raw_index, getidx;
+	struct lpfc_hbq_init *hip;
+	struct hbq_s *hbqs;
+	struct lpfc_hbq_entry *hbqe;
+	struct lpfc_dmabuf *d_buf;
+	struct hbq_dmabuf *hbq_buf;
+
+	if (phba->sli_rev != 3)
+		return 0;
+	cnt = LPFC_HBQINFO_SIZE;
+	spin_lock_irq(&phba->hbalock);
+
+	/* toggle between multiple hbqs, if any */
+	i = lpfc_sli_hbq_count();
+	if (i > 1) {
+		 lpfc_debugfs_last_hbq++;
+		 if (lpfc_debugfs_last_hbq >= i)
+			lpfc_debugfs_last_hbq = 0;
+	}
+	else
+		lpfc_debugfs_last_hbq = 0;
+
+	i = lpfc_debugfs_last_hbq;
+
+	len +=  snprintf(buf+len, size-len, "HBQ %d Info\n", i);
+
+	hbqs =  &phba->hbqs[i];
+	posted = 0;
+	list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list)
+		posted++;
+
+	hip =  lpfc_hbq_defs[i];
+	len +=  snprintf(buf+len, size-len,
+		"idx:%d prof:%d rn:%d bufcnt:%d icnt:%d acnt:%d posted %d\n",
+		hip->hbq_index, hip->profile, hip->rn,
+		hip->buffer_count, hip->init_count, hip->add_count, posted);
+
+	raw_index = phba->hbq_get[i];
+	getidx = le32_to_cpu(raw_index);
+	len +=  snprintf(buf+len, size-len,
+		"entrys:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
+		hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx,
+		hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx);
+
+	hbqe = (struct lpfc_hbq_entry *) phba->hbqs[i].hbq_virt;
+	for (j=0; j<hbqs->entry_count; j++) {
+		len +=  snprintf(buf+len, size-len,
+			"%03d: %08x %04x %05x ", j,
+			le32_to_cpu(hbqe->bde.addrLow),
+			le32_to_cpu(hbqe->bde.tus.w),
+			le32_to_cpu(hbqe->buffer_tag));
+		i = 0;
+		found = 0;
+
+		/* First calculate if slot has an associated posted buffer */
+		low = hbqs->hbqPutIdx - posted;
+		if (low >= 0) {
+			if ((j >= hbqs->hbqPutIdx) || (j < low)) {
+				len +=  snprintf(buf+len, size-len, "Unused\n");
+				goto skipit;
+			}
+		}
+		else {
+			if ((j >= hbqs->hbqPutIdx) &&
+				(j < (hbqs->entry_count+low))) {
+				len +=  snprintf(buf+len, size-len, "Unused\n");
+				goto skipit;
+			}
+		}
+
+		/* Get the Buffer info for the posted buffer */
+		list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list) {
+			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+			phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff);
+			if (phys == le32_to_cpu(hbqe->bde.addrLow)) {
+				len +=  snprintf(buf+len, size-len,
+					"Buf%d: %p %06x\n", i,
+					hbq_buf->dbuf.virt, hbq_buf->tag);
+				found = 1;
+				break;
+			}
+			i++;
+		}
+		if (!found) {
+			len +=  snprintf(buf+len, size-len, "No DMAinfo?\n");
+		}
+skipit:
+		hbqe++;
+		if (len > LPFC_HBQINFO_SIZE - 54)
+			break;
+	}
+	spin_unlock_irq(&phba->hbalock);
+	return len;
+}
+
+static int lpfc_debugfs_last_hba_slim_off;
+
+/**
+ * lpfc_debugfs_dumpHBASlim_data - Dump HBA SLIM info to a buffer
+ * @phba: The HBA to gather SLIM info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the current contents of HBA SLIM for the HBA associated
+ * with @phba to @buf up to @size bytes of data. This is the raw HBA SLIM data.
+ *
+ * Notes:
+ * This routine will only dump up to 1024 bytes of data each time called and
+ * should be called multiple times to dump the entire HBA SLIM.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
+{
+	int len = 0;
+	int i, off;
+	uint32_t *ptr;
+	char *buffer;
+
+	buffer = kmalloc(1024, GFP_KERNEL);
+	if (!buffer)
+		return 0;
+
+	off = 0;
+	spin_lock_irq(&phba->hbalock);
+
+	len +=  snprintf(buf+len, size-len, "HBA SLIM\n");
+	lpfc_memcpy_from_slim(buffer,
+		phba->MBslimaddr + lpfc_debugfs_last_hba_slim_off, 1024);
+
+	ptr = (uint32_t *)&buffer[0];
+	off = lpfc_debugfs_last_hba_slim_off;
+
+	/* Set it up for the next time */
+	lpfc_debugfs_last_hba_slim_off += 1024;
+	if (lpfc_debugfs_last_hba_slim_off >= 4096)
+		lpfc_debugfs_last_hba_slim_off = 0;
+
+	i = 1024;
+	while (i > 0) {
+		len +=  snprintf(buf+len, size-len,
+		"%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+		off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
+		*(ptr+5), *(ptr+6), *(ptr+7));
+		ptr += 8;
+		i -= (8 * sizeof(uint32_t));
+		off += (8 * sizeof(uint32_t));
+	}
+
+	spin_unlock_irq(&phba->hbalock);
+	kfree(buffer);
+
+	return len;
+}
+
+/**
+ * lpfc_debugfs_dumpHostSlim_data - Dump host SLIM info to a buffer
+ * @phba: The HBA to gather Host SLIM info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the current contents of host SLIM for the host associated
+ * with @phba to @buf up to @size bytes of data. The dump will contain the
+ * Mailbox, PCB, Rings, and Registers that are located in host memory.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
+{
+	int len = 0;
+	int i, off;
+	uint32_t word0, word1, word2, word3;
+	uint32_t *ptr;
+	struct lpfc_pgp *pgpp;
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *pring;
+
+	off = 0;
+	spin_lock_irq(&phba->hbalock);
+
+	len +=  snprintf(buf+len, size-len, "SLIM Mailbox\n");
+	ptr = (uint32_t *)phba->slim2p.virt;
+	i = sizeof(MAILBOX_t);
+	while (i > 0) {
+		len +=  snprintf(buf+len, size-len,
+		"%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+		off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
+		*(ptr+5), *(ptr+6), *(ptr+7));
+		ptr += 8;
+		i -= (8 * sizeof(uint32_t));
+		off += (8 * sizeof(uint32_t));
+	}
+
+	len +=  snprintf(buf+len, size-len, "SLIM PCB\n");
+	ptr = (uint32_t *)phba->pcb;
+	i = sizeof(PCB_t);
+	while (i > 0) {
+		len +=  snprintf(buf+len, size-len,
+		"%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+		off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
+		*(ptr+5), *(ptr+6), *(ptr+7));
+		ptr += 8;
+		i -= (8 * sizeof(uint32_t));
+		off += (8 * sizeof(uint32_t));
+	}
+
+	for (i = 0; i < 4; i++) {
+		pgpp = &phba->port_gp[i];
+		pring = &psli->ring[i];
+		len +=  snprintf(buf+len, size-len,
+				 "Ring %d: CMD GetInx:%d (Max:%d Next:%d "
+				 "Local:%d flg:x%x)  RSP PutInx:%d Max:%d\n",
+				 i, pgpp->cmdGetInx, pring->numCiocb,
+				 pring->next_cmdidx, pring->local_getidx,
+				 pring->flag, pgpp->rspPutInx, pring->numRiocb);
+	}
+
+	if (phba->sli_rev <= LPFC_SLI_REV3) {
+		word0 = readl(phba->HAregaddr);
+		word1 = readl(phba->CAregaddr);
+		word2 = readl(phba->HSregaddr);
+		word3 = readl(phba->HCregaddr);
+		len +=  snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
+				 "HC:%08x\n", word0, word1, word2, word3);
+	}
+	spin_unlock_irq(&phba->hbalock);
+	return len;
+}
+
+/**
+ * lpfc_debugfs_nodelist_data - Dump target node list to a buffer
+ * @vport: The vport to gather target node info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the current target node list associated with @vport to
+ * @buf up to @size bytes of data. Each node entry in the dump will contain a
+ * node state, DID, WWPN, WWNN, RPI, flags, type, and other useful fields.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
+{
+	int len = 0;
+	int cnt;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp;
+	unsigned char *statep, *name;
+
+	cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
+
+	spin_lock_irq(shost->host_lock);
+	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!cnt) {
+			len +=  snprintf(buf+len, size-len,
+				"Missing Nodelist Entries\n");
+			break;
+		}
+		cnt--;
+		switch (ndlp->nlp_state) {
+		case NLP_STE_UNUSED_NODE:
+			statep = "UNUSED";
+			break;
+		case NLP_STE_PLOGI_ISSUE:
+			statep = "PLOGI ";
+			break;
+		case NLP_STE_ADISC_ISSUE:
+			statep = "ADISC ";
+			break;
+		case NLP_STE_REG_LOGIN_ISSUE:
+			statep = "REGLOG";
+			break;
+		case NLP_STE_PRLI_ISSUE:
+			statep = "PRLI  ";
+			break;
+		case NLP_STE_UNMAPPED_NODE:
+			statep = "UNMAP ";
+			break;
+		case NLP_STE_MAPPED_NODE:
+			statep = "MAPPED";
+			break;
+		case NLP_STE_NPR_NODE:
+			statep = "NPR   ";
+			break;
+		default:
+			statep = "UNKNOWN";
+		}
+		len +=  snprintf(buf+len, size-len, "%s DID:x%06x ",
+			statep, ndlp->nlp_DID);
+		name = (unsigned char *)&ndlp->nlp_portname;
+		len +=  snprintf(buf+len, size-len,
+			"WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ",
+			*name, *(name+1), *(name+2), *(name+3),
+			*(name+4), *(name+5), *(name+6), *(name+7));
+		name = (unsigned char *)&ndlp->nlp_nodename;
+		len +=  snprintf(buf+len, size-len,
+			"WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ",
+			*name, *(name+1), *(name+2), *(name+3),
+			*(name+4), *(name+5), *(name+6), *(name+7));
+		len +=  snprintf(buf+len, size-len, "RPI:%03d flag:x%08x ",
+			ndlp->nlp_rpi, ndlp->nlp_flag);
+		if (!ndlp->nlp_type)
+			len +=  snprintf(buf+len, size-len, "UNKNOWN_TYPE ");
+		if (ndlp->nlp_type & NLP_FC_NODE)
+			len +=  snprintf(buf+len, size-len, "FC_NODE ");
+		if (ndlp->nlp_type & NLP_FABRIC)
+			len +=  snprintf(buf+len, size-len, "FABRIC ");
+		if (ndlp->nlp_type & NLP_FCP_TARGET)
+			len +=  snprintf(buf+len, size-len, "FCP_TGT sid:%d ",
+				ndlp->nlp_sid);
+		if (ndlp->nlp_type & NLP_FCP_INITIATOR)
+			len +=  snprintf(buf+len, size-len, "FCP_INITIATOR ");
+		len += snprintf(buf+len, size-len, "usgmap:%x ",
+			ndlp->nlp_usg_map);
+		len += snprintf(buf+len, size-len, "refcnt:%x",
+			atomic_read(&ndlp->kref.refcount));
+		len +=  snprintf(buf+len, size-len, "\n");
+	}
+	spin_unlock_irq(shost->host_lock);
+	return len;
+}
+#endif
+
+/**
+ * lpfc_debugfs_disc_trc - Store discovery trace log
+ * @vport: The vport to associate this trace string with for retrieval.
+ * @mask: Log entry classification.
+ * @fmt: Format string to be displayed when dumping the log.
+ * @data1: 1st data parameter to be applied to @fmt.
+ * @data2: 2nd data parameter to be applied to @fmt.
+ * @data3: 3rd data parameter to be applied to @fmt.
+ *
+ * Description:
+ * This routine is used by the driver code to add a debugfs log entry to the
+ * discovery trace buffer associated with @vport. Only entries with a @mask that
+ * match the current debugfs discovery mask will be saved. Entries that do not
+ * match will be thrown away. @fmt, @data1, @data2, and @data3 are used like
+ * printf when displaying the log.
+ **/
+inline void
+lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
+	uint32_t data1, uint32_t data2, uint32_t data3)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	struct lpfc_debugfs_trc *dtp;
+	int index;
+
+	if (!(lpfc_debugfs_mask_disc_trc & mask))
+		return;
+
+	if (!lpfc_debugfs_enable || !lpfc_debugfs_max_disc_trc ||
+		!vport || !vport->disc_trc)
+		return;
+
+	index = atomic_inc_return(&vport->disc_trc_cnt) &
+		(lpfc_debugfs_max_disc_trc - 1);
+	dtp = vport->disc_trc + index;
+	dtp->fmt = fmt;
+	dtp->data1 = data1;
+	dtp->data2 = data2;
+	dtp->data3 = data3;
+	dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
+	dtp->jif = jiffies;
+#endif
+	return;
+}
+
+/**
+ * lpfc_debugfs_slow_ring_trc - Store slow ring trace log
+ * @phba: The phba to associate this trace string with for retrieval.
+ * @fmt: Format string to be displayed when dumping the log.
+ * @data1: 1st data parameter to be applied to @fmt.
+ * @data2: 2nd data parameter to be applied to @fmt.
+ * @data3: 3rd data parameter to be applied to @fmt.
+ *
+ * Description:
+ * This routine is used by the driver code to add a debugfs log entry to the
+ * discovery trace buffer associated with @vport. @fmt, @data1, @data2, and
+ * @data3 are used like printf when displaying the log.
+ **/
+inline void
+lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
+	uint32_t data1, uint32_t data2, uint32_t data3)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	struct lpfc_debugfs_trc *dtp;
+	int index;
+
+	if (!lpfc_debugfs_enable || !lpfc_debugfs_max_slow_ring_trc ||
+		!phba || !phba->slow_ring_trc)
+		return;
+
+	index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
+		(lpfc_debugfs_max_slow_ring_trc - 1);
+	dtp = phba->slow_ring_trc + index;
+	dtp->fmt = fmt;
+	dtp->data1 = data1;
+	dtp->data2 = data2;
+	dtp->data3 = data3;
+	dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
+	dtp->jif = jiffies;
+#endif
+	return;
+}
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+/**
+ * lpfc_debugfs_disc_trc_open - Open the discovery trace log
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_vport *vport = inode->i_private;
+	struct lpfc_debug *debug;
+	int size;
+	int rc = -ENOMEM;
+
+	if (!lpfc_debugfs_max_disc_trc) {
+		 rc = -ENOSPC;
+		goto out;
+	}
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	/* Round to page boundary */
+	size =  (lpfc_debugfs_max_disc_trc * LPFC_DEBUG_TRC_ENTRY_SIZE);
+	size = PAGE_ALIGN(size);
+
+	debug->buffer = kmalloc(size, GFP_KERNEL);
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = lpfc_debugfs_disc_trc_data(vport, debug->buffer, size);
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+/**
+ * lpfc_debugfs_slow_ring_trc_open - Open the Slow Ring trace log
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_hba *phba = inode->i_private;
+	struct lpfc_debug *debug;
+	int size;
+	int rc = -ENOMEM;
+
+	if (!lpfc_debugfs_max_slow_ring_trc) {
+		 rc = -ENOSPC;
+		goto out;
+	}
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	/* Round to page boundary */
+	size =  (lpfc_debugfs_max_slow_ring_trc * LPFC_DEBUG_TRC_ENTRY_SIZE);
+	size = PAGE_ALIGN(size);
+
+	debug->buffer = kmalloc(size, GFP_KERNEL);
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = lpfc_debugfs_slow_ring_trc_data(phba, debug->buffer, size);
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+/**
+ * lpfc_debugfs_hbqinfo_open - Open the hbqinfo debugfs buffer
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_hba *phba = inode->i_private;
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	/* Round to page boundary */
+	debug->buffer = kmalloc(LPFC_HBQINFO_SIZE, GFP_KERNEL);
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = lpfc_debugfs_hbqinfo_data(phba, debug->buffer,
+		LPFC_HBQINFO_SIZE);
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+/**
+ * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_hba *phba = inode->i_private;
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	/* Round to page boundary */
+	debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL);
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = lpfc_debugfs_dumpHBASlim_data(phba, debug->buffer,
+		LPFC_DUMPHBASLIM_SIZE);
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+/**
+ * lpfc_debugfs_dumpHostSlim_open - Open the Dump Host SLIM debugfs buffer
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_hba *phba = inode->i_private;
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	/* Round to page boundary */
+	debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL);
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = lpfc_debugfs_dumpHostSlim_data(phba, debug->buffer,
+		LPFC_DUMPHOSTSLIM_SIZE);
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+static int
+lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	if (!_dump_buf_data)
+		return -EBUSY;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	/* Round to page boundary */
+	printk(KERN_ERR "9059 BLKGRD:  %s: _dump_buf_data=0x%p\n",
+			__func__, _dump_buf_data);
+	debug->buffer = _dump_buf_data;
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = (1 << _dump_buf_data_order) << PAGE_SHIFT;
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+static int
+lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	if (!_dump_buf_dif)
+		return -EBUSY;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	/* Round to page boundary */
+	printk(KERN_ERR	"9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%s\n",
+		__func__, _dump_buf_dif, file->f_dentry->d_name.name);
+	debug->buffer = _dump_buf_dif;
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = (1 << _dump_buf_dif_order) << PAGE_SHIFT;
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+static ssize_t
+lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
+		  size_t nbytes, loff_t *ppos)
+{
+	/*
+	 * The Data/DIF buffers only save one failing IO
+	 * The write op is used as a reset mechanism after an IO has
+	 * already been saved to the next one can be saved
+	 */
+	spin_lock(&_dump_buf_lock);
+
+	memset((void *)_dump_buf_data, 0,
+			((1 << PAGE_SHIFT) << _dump_buf_data_order));
+	memset((void *)_dump_buf_dif, 0,
+			((1 << PAGE_SHIFT) << _dump_buf_dif_order));
+
+	_dump_buf_done = 0;
+
+	spin_unlock(&_dump_buf_lock);
+
+	return nbytes;
+}
+
+static ssize_t
+lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
+	size_t nbytes, loff_t *ppos)
+{
+	struct dentry *dent = file->f_dentry;
+	struct lpfc_hba *phba = file->private_data;
+	char cbuf[32];
+	uint64_t tmp = 0;
+	int cnt = 0;
+
+	if (dent == phba->debug_writeGuard)
+		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt);
+	else if (dent == phba->debug_writeApp)
+		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt);
+	else if (dent == phba->debug_writeRef)
+		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt);
+	else if (dent == phba->debug_readGuard)
+		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt);
+	else if (dent == phba->debug_readApp)
+		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt);
+	else if (dent == phba->debug_readRef)
+		cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt);
+	else if (dent == phba->debug_InjErrNPortID)
+		cnt = snprintf(cbuf, 32, "0x%06x\n", phba->lpfc_injerr_nportid);
+	else if (dent == phba->debug_InjErrWWPN) {
+		memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name));
+		tmp = cpu_to_be64(tmp);
+		cnt = snprintf(cbuf, 32, "0x%016llx\n", tmp);
+	} else if (dent == phba->debug_InjErrLBA) {
+		if (phba->lpfc_injerr_lba == (sector_t)(-1))
+			cnt = snprintf(cbuf, 32, "off\n");
+		else
+			cnt = snprintf(cbuf, 32, "0x%llx\n",
+				 (uint64_t) phba->lpfc_injerr_lba);
+	} else
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			 "0547 Unknown debugfs error injection entry\n");
+
+	return simple_read_from_buffer(buf, nbytes, ppos, &cbuf, cnt);
+}
+
+static ssize_t
+lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
+	size_t nbytes, loff_t *ppos)
+{
+	struct dentry *dent = file->f_dentry;
+	struct lpfc_hba *phba = file->private_data;
+	char dstbuf[32];
+	uint64_t tmp = 0;
+	int size;
+
+	memset(dstbuf, 0, 32);
+	size = (nbytes < 32) ? nbytes : 32;
+	if (copy_from_user(dstbuf, buf, size))
+		return 0;
+
+	if (dent == phba->debug_InjErrLBA) {
+		if ((buf[0] == 'o') && (buf[1] == 'f') && (buf[2] == 'f'))
+			tmp = (uint64_t)(-1);
+	}
+
+	if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp)))
+		return 0;
+
+	if (dent == phba->debug_writeGuard)
+		phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp;
+	else if (dent == phba->debug_writeApp)
+		phba->lpfc_injerr_wapp_cnt = (uint32_t)tmp;
+	else if (dent == phba->debug_writeRef)
+		phba->lpfc_injerr_wref_cnt = (uint32_t)tmp;
+	else if (dent == phba->debug_readGuard)
+		phba->lpfc_injerr_rgrd_cnt = (uint32_t)tmp;
+	else if (dent == phba->debug_readApp)
+		phba->lpfc_injerr_rapp_cnt = (uint32_t)tmp;
+	else if (dent == phba->debug_readRef)
+		phba->lpfc_injerr_rref_cnt = (uint32_t)tmp;
+	else if (dent == phba->debug_InjErrLBA)
+		phba->lpfc_injerr_lba = (sector_t)tmp;
+	else if (dent == phba->debug_InjErrNPortID)
+		phba->lpfc_injerr_nportid = (uint32_t)(tmp & Mask_DID);
+	else if (dent == phba->debug_InjErrWWPN) {
+		tmp = cpu_to_be64(tmp);
+		memcpy(&phba->lpfc_injerr_wwpn, &tmp, sizeof(struct lpfc_name));
+	} else
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			 "0548 Unknown debugfs error injection entry\n");
+
+	return nbytes;
+}
+
+static int
+lpfc_debugfs_dif_err_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+/**
+ * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_vport *vport = inode->i_private;
+	struct lpfc_debug *debug;
+	int rc = -ENOMEM;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		goto out;
+
+	/* Round to page boundary */
+	debug->buffer = kmalloc(LPFC_NODELIST_SIZE, GFP_KERNEL);
+	if (!debug->buffer) {
+		kfree(debug);
+		goto out;
+	}
+
+	debug->len = lpfc_debugfs_nodelist_data(vport, debug->buffer,
+		LPFC_NODELIST_SIZE);
+	file->private_data = debug;
+
+	rc = 0;
+out:
+	return rc;
+}
+
+/**
+ * lpfc_debugfs_lseek - Seek through a debugfs file
+ * @file: The file pointer to seek through.
+ * @off: The offset to seek to or the amount to seek by.
+ * @whence: Indicates how to seek.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs lseek file operation. The
+ * @whence parameter indicates whether @off is the offset to directly seek to,
+ * or if it is a value to seek forward or reverse by. This function figures out
+ * what the new offset of the debugfs file will be and assigns that value to the
+ * f_pos field of @file.
+ *
+ * Returns:
+ * This function returns the new offset if successful and returns a negative
+ * error if unable to process the seek.
+ **/
+static loff_t
+lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
+{
+	struct lpfc_debug *debug;
+	loff_t pos = -1;
+
+	debug = file->private_data;
+
+	switch (whence) {
+	case 0:
+		pos = off;
+		break;
+	case 1:
+		pos = file->f_pos + off;
+		break;
+	case 2:
+		pos = debug->len - off;
+	}
+	return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
+}
+
+/**
+ * lpfc_debugfs_read - Read a debugfs file
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from from the buffer indicated in the private_data
+ * field of @file. It will start reading at @ppos and copy up to @nbytes of
+ * data to @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_debugfs_read(struct file *file, char __user *buf,
+		  size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+
+	return simple_read_from_buffer(buf, nbytes, ppos, debug->buffer,
+				       debug->len);
+}
+
+/**
+ * lpfc_debugfs_release - Release the buffer used to store debugfs file data
+ * @inode: The inode pointer that contains a vport pointer. (unused)
+ * @file: The file pointer that contains the buffer to release.
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs file was
+ * opened.
+ *
+ * Returns:
+ * This function returns zero.
+ **/
+static int
+lpfc_debugfs_release(struct inode *inode, struct file *file)
+{
+	struct lpfc_debug *debug = file->private_data;
+
+	kfree(debug->buffer);
+	kfree(debug);
+
+	return 0;
+}
+
+static int
+lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
+{
+	struct lpfc_debug *debug = file->private_data;
+
+	debug->buffer = NULL;
+	kfree(debug);
+
+	return 0;
+}
+
+/*
+ * ---------------------------------
+ * iDiag debugfs file access methods
+ * ---------------------------------
+ *
+ * All access methods are through the proper SLI4 PCI function's debugfs
+ * iDiag directory:
+ *
+ *     /sys/kernel/debug/lpfc/fn<#>/iDiag
+ */
+
+/**
+ * lpfc_idiag_cmd_get - Get and parse idiag debugfs comands from user space
+ * @buf: The pointer to the user space buffer.
+ * @nbytes: The number of bytes in the user space buffer.
+ * @idiag_cmd: pointer to the idiag command struct.
+ *
+ * This routine reads data from debugfs user space buffer and parses the
+ * buffer for getting the idiag command and arguments. The while space in
+ * between the set of data is used as the parsing separator.
+ *
+ * This routine returns 0 when successful, it returns proper error code
+ * back to the user space in error conditions.
+ */
+static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes,
+			      struct lpfc_idiag_cmd *idiag_cmd)
+{
+	char mybuf[64];
+	char *pbuf, *step_str;
+	int i;
+	size_t bsize;
+
+	/* Protect copy from user */
+	if (!access_ok(VERIFY_READ, buf, nbytes))
+		return -EFAULT;
+
+	memset(mybuf, 0, sizeof(mybuf));
+	memset(idiag_cmd, 0, sizeof(*idiag_cmd));
+	bsize = min(nbytes, (sizeof(mybuf)-1));
+
+	if (copy_from_user(mybuf, buf, bsize))
+		return -EFAULT;
+	pbuf = &mybuf[0];
+	step_str = strsep(&pbuf, "\t ");
+
+	/* The opcode must present */
+	if (!step_str)
+		return -EINVAL;
+
+	idiag_cmd->opcode = simple_strtol(step_str, NULL, 0);
+	if (idiag_cmd->opcode == 0)
+		return -EINVAL;
+
+	for (i = 0; i < LPFC_IDIAG_CMD_DATA_SIZE; i++) {
+		step_str = strsep(&pbuf, "\t ");
+		if (!step_str)
+			return i;
+		idiag_cmd->data[i] = simple_strtol(step_str, NULL, 0);
+	}
+	return i;
+}
+
+/**
+ * lpfc_idiag_open - idiag open debugfs
+ * @inode: The inode pointer that contains a pointer to phba.
+ * @file: The file pointer to attach the file operation.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It
+ * gets the reference to phba from the i_private field in @inode, it then
+ * allocates buffer for the file operation, performs the necessary PCI config
+ * space read into the allocated buffer according to the idiag user command
+ * setup, and then returns a pointer to buffer in the private_data field in
+ * @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an
+ * negative error value.
+ **/
+static int
+lpfc_idiag_open(struct inode *inode, struct file *file)
+{
+	struct lpfc_debug *debug;
+
+	debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+	if (!debug)
+		return -ENOMEM;
+
+	debug->i_private = inode->i_private;
+	debug->buffer = NULL;
+	file->private_data = debug;
+
+	return 0;
+}
+
+/**
+ * lpfc_idiag_release - Release idiag access file operation
+ * @inode: The inode pointer that contains a vport pointer. (unused)
+ * @file: The file pointer that contains the buffer to release.
+ *
+ * Description:
+ * This routine is the generic release routine for the idiag access file
+ * operation, it frees the buffer that was allocated when the debugfs file
+ * was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ **/
+static int
+lpfc_idiag_release(struct inode *inode, struct file *file)
+{
+	struct lpfc_debug *debug = file->private_data;
+
+	/* Free the buffers to the file operation */
+	kfree(debug->buffer);
+	kfree(debug);
+
+	return 0;
+}
+
+/**
+ * lpfc_idiag_cmd_release - Release idiag cmd access file operation
+ * @inode: The inode pointer that contains a vport pointer. (unused)
+ * @file: The file pointer that contains the buffer to release.
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs file
+ * was opened. It also reset the fields in the idiag command struct in the
+ * case of command for write operation.
+ *
+ * Returns:
+ * This function returns zero.
+ **/
+static int
+lpfc_idiag_cmd_release(struct inode *inode, struct file *file)
+{
+	struct lpfc_debug *debug = file->private_data;
+
+	if (debug->op == LPFC_IDIAG_OP_WR) {
+		switch (idiag.cmd.opcode) {
+		case LPFC_IDIAG_CMD_PCICFG_WR:
+		case LPFC_IDIAG_CMD_PCICFG_ST:
+		case LPFC_IDIAG_CMD_PCICFG_CL:
+		case LPFC_IDIAG_CMD_QUEACC_WR:
+		case LPFC_IDIAG_CMD_QUEACC_ST:
+		case LPFC_IDIAG_CMD_QUEACC_CL:
+			memset(&idiag, 0, sizeof(idiag));
+			break;
+		default:
+			break;
+		}
+	}
+
+	/* Free the buffers to the file operation */
+	kfree(debug->buffer);
+	kfree(debug);
+
+	return 0;
+}
+
+/**
+ * lpfc_idiag_pcicfg_read - idiag debugfs read pcicfg
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba pci config space according to the
+ * idiag command, and copies to user @buf. Depending on the PCI config space
+ * read command setup, it does either a single register read of a byte
+ * (8 bits), a word (16 bits), or a dword (32 bits) or browsing through all
+ * registers from the 4K extended PCI config space.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
+		       loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	int offset_label, offset, len = 0, index = LPFC_PCI_CFG_RD_SIZE;
+	int where, count;
+	char *pbuffer;
+	struct pci_dev *pdev;
+	uint32_t u32val;
+	uint16_t u16val;
+	uint8_t u8val;
+
+	pdev = phba->pcidev;
+	if (!pdev)
+		return 0;
+
+	/* This is a user read operation */
+	debug->op = LPFC_IDIAG_OP_RD;
+
+	if (!debug->buffer)
+		debug->buffer = kmalloc(LPFC_PCI_CFG_SIZE, GFP_KERNEL);
+	if (!debug->buffer)
+		return 0;
+	pbuffer = debug->buffer;
+
+	if (*ppos)
+		return 0;
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
+		where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
+		count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
+	} else
+		return 0;
+
+	/* Read single PCI config space register */
+	switch (count) {
+	case SIZE_U8: /* byte (8 bits) */
+		pci_read_config_byte(pdev, where, &u8val);
+		len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+				"%03x: %02x\n", where, u8val);
+		break;
+	case SIZE_U16: /* word (16 bits) */
+		pci_read_config_word(pdev, where, &u16val);
+		len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+				"%03x: %04x\n", where, u16val);
+		break;
+	case SIZE_U32: /* double word (32 bits) */
+		pci_read_config_dword(pdev, where, &u32val);
+		len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+				"%03x: %08x\n", where, u32val);
+		break;
+	case LPFC_PCI_CFG_BROWSE: /* browse all */
+		goto pcicfg_browse;
+		break;
+	default:
+		/* illegal count */
+		len = 0;
+		break;
+	}
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+
+pcicfg_browse:
+
+	/* Browse all PCI config space registers */
+	offset_label = idiag.offset.last_rd;
+	offset = offset_label;
+
+	/* Read PCI config space */
+	len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+			"%03x: ", offset_label);
+	while (index > 0) {
+		pci_read_config_dword(pdev, offset, &u32val);
+		len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+				"%08x ", u32val);
+		offset += sizeof(uint32_t);
+		if (offset >= LPFC_PCI_CFG_SIZE) {
+			len += snprintf(pbuffer+len,
+					LPFC_PCI_CFG_SIZE-len, "\n");
+			break;
+		}
+		index -= sizeof(uint32_t);
+		if (!index)
+			len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+					"\n");
+		else if (!(index % (8 * sizeof(uint32_t)))) {
+			offset_label += (8 * sizeof(uint32_t));
+			len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+					"\n%03x: ", offset_label);
+		}
+	}
+
+	/* Set up the offset for next portion of pci cfg read */
+	if (index == 0) {
+		idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE;
+		if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE)
+			idiag.offset.last_rd = 0;
+	} else
+		idiag.offset.last_rd = 0;
+
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_pcicfg_write - Syntax check and set up idiag pcicfg commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and
+ * then perform the syntax check for PCI config space read or write command
+ * accordingly. In the case of PCI config space read command, it sets up
+ * the command in the idiag command struct for the debugfs read operation.
+ * In the case of PCI config space write operation, it executes the write
+ * operation into the PCI config space accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ */
+static ssize_t
+lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
+			size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	uint32_t where, value, count;
+	uint32_t u32val;
+	uint16_t u16val;
+	uint8_t u8val;
+	struct pci_dev *pdev;
+	int rc;
+
+	pdev = phba->pcidev;
+	if (!pdev)
+		return -EFAULT;
+
+	/* This is a user write operation */
+	debug->op = LPFC_IDIAG_OP_WR;
+
+	rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+	if (rc < 0)
+		return rc;
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
+		/* Sanity check on PCI config read command line arguments */
+		if (rc != LPFC_PCI_CFG_RD_CMD_ARG)
+			goto error_out;
+		/* Read command from PCI config space, set up command fields */
+		where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
+		count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
+		if (count == LPFC_PCI_CFG_BROWSE) {
+			if (where % sizeof(uint32_t))
+				goto error_out;
+			/* Starting offset to browse */
+			idiag.offset.last_rd = where;
+		} else if ((count != sizeof(uint8_t)) &&
+			   (count != sizeof(uint16_t)) &&
+			   (count != sizeof(uint32_t)))
+			goto error_out;
+		if (count == sizeof(uint8_t)) {
+			if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t))
+				goto error_out;
+			if (where % sizeof(uint8_t))
+				goto error_out;
+		}
+		if (count == sizeof(uint16_t)) {
+			if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t))
+				goto error_out;
+			if (where % sizeof(uint16_t))
+				goto error_out;
+		}
+		if (count == sizeof(uint32_t)) {
+			if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t))
+				goto error_out;
+			if (where % sizeof(uint32_t))
+				goto error_out;
+		}
+	} else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR ||
+		   idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST ||
+		   idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+		/* Sanity check on PCI config write command line arguments */
+		if (rc != LPFC_PCI_CFG_WR_CMD_ARG)
+			goto error_out;
+		/* Write command to PCI config space, read-modify-write */
+		where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
+		count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
+		value = idiag.cmd.data[IDIAG_PCICFG_VALUE_INDX];
+		/* Sanity checks */
+		if ((count != sizeof(uint8_t)) &&
+		    (count != sizeof(uint16_t)) &&
+		    (count != sizeof(uint32_t)))
+			goto error_out;
+		if (count == sizeof(uint8_t)) {
+			if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t))
+				goto error_out;
+			if (where % sizeof(uint8_t))
+				goto error_out;
+			if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+				pci_write_config_byte(pdev, where,
+						      (uint8_t)value);
+			if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+				rc = pci_read_config_byte(pdev, where, &u8val);
+				if (!rc) {
+					u8val |= (uint8_t)value;
+					pci_write_config_byte(pdev, where,
+							      u8val);
+				}
+			}
+			if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+				rc = pci_read_config_byte(pdev, where, &u8val);
+				if (!rc) {
+					u8val &= (uint8_t)(~value);
+					pci_write_config_byte(pdev, where,
+							      u8val);
+				}
+			}
+		}
+		if (count == sizeof(uint16_t)) {
+			if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t))
+				goto error_out;
+			if (where % sizeof(uint16_t))
+				goto error_out;
+			if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+				pci_write_config_word(pdev, where,
+						      (uint16_t)value);
+			if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+				rc = pci_read_config_word(pdev, where, &u16val);
+				if (!rc) {
+					u16val |= (uint16_t)value;
+					pci_write_config_word(pdev, where,
+							      u16val);
+				}
+			}
+			if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+				rc = pci_read_config_word(pdev, where, &u16val);
+				if (!rc) {
+					u16val &= (uint16_t)(~value);
+					pci_write_config_word(pdev, where,
+							      u16val);
+				}
+			}
+		}
+		if (count == sizeof(uint32_t)) {
+			if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t))
+				goto error_out;
+			if (where % sizeof(uint32_t))
+				goto error_out;
+			if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+				pci_write_config_dword(pdev, where, value);
+			if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+				rc = pci_read_config_dword(pdev, where,
+							   &u32val);
+				if (!rc) {
+					u32val |= value;
+					pci_write_config_dword(pdev, where,
+							       u32val);
+				}
+			}
+			if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+				rc = pci_read_config_dword(pdev, where,
+							   &u32val);
+				if (!rc) {
+					u32val &= ~value;
+					pci_write_config_dword(pdev, where,
+							       u32val);
+				}
+			}
+		}
+	} else
+		/* All other opecodes are illegal for now */
+		goto error_out;
+
+	return nbytes;
+error_out:
+	memset(&idiag, 0, sizeof(idiag));
+	return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_baracc_read - idiag debugfs pci bar access read
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba pci bar memory mapped space
+ * according to the idiag command, and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_baracc_read(struct file *file, char __user *buf, size_t nbytes,
+		       loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	int offset_label, offset, offset_run, len = 0, index;
+	int bar_num, acc_range, bar_size;
+	char *pbuffer;
+	void __iomem *mem_mapped_bar;
+	uint32_t if_type;
+	struct pci_dev *pdev;
+	uint32_t u32val;
+
+	pdev = phba->pcidev;
+	if (!pdev)
+		return 0;
+
+	/* This is a user read operation */
+	debug->op = LPFC_IDIAG_OP_RD;
+
+	if (!debug->buffer)
+		debug->buffer = kmalloc(LPFC_PCI_BAR_RD_BUF_SIZE, GFP_KERNEL);
+	if (!debug->buffer)
+		return 0;
+	pbuffer = debug->buffer;
+
+	if (*ppos)
+		return 0;
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) {
+		bar_num   = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX];
+		offset    = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX];
+		acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX];
+		bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX];
+	} else
+		return 0;
+
+	if (acc_range == 0)
+		return 0;
+
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+		if (bar_num == IDIAG_BARACC_BAR_0)
+			mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
+		else if (bar_num == IDIAG_BARACC_BAR_1)
+			mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p;
+		else if (bar_num == IDIAG_BARACC_BAR_2)
+			mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p;
+		else
+			return 0;
+	} else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+		if (bar_num == IDIAG_BARACC_BAR_0)
+			mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
+		else
+			return 0;
+	} else
+		return 0;
+
+	/* Read single PCI bar space register */
+	if (acc_range == SINGLE_WORD) {
+		offset_run = offset;
+		u32val = readl(mem_mapped_bar + offset_run);
+		len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+				"%05x: %08x\n", offset_run, u32val);
+	} else
+		goto baracc_browse;
+
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+
+baracc_browse:
+
+	/* Browse all PCI bar space registers */
+	offset_label = idiag.offset.last_rd;
+	offset_run = offset_label;
+
+	/* Read PCI bar memory mapped space */
+	len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+			"%05x: ", offset_label);
+	index = LPFC_PCI_BAR_RD_SIZE;
+	while (index > 0) {
+		u32val = readl(mem_mapped_bar + offset_run);
+		len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+				"%08x ", u32val);
+		offset_run += sizeof(uint32_t);
+		if (acc_range == LPFC_PCI_BAR_BROWSE) {
+			if (offset_run >= bar_size) {
+				len += snprintf(pbuffer+len,
+					LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
+				break;
+			}
+		} else {
+			if (offset_run >= offset +
+			    (acc_range * sizeof(uint32_t))) {
+				len += snprintf(pbuffer+len,
+					LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
+				break;
+			}
+		}
+		index -= sizeof(uint32_t);
+		if (!index)
+			len += snprintf(pbuffer+len,
+					LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
+		else if (!(index % (8 * sizeof(uint32_t)))) {
+			offset_label += (8 * sizeof(uint32_t));
+			len += snprintf(pbuffer+len,
+					LPFC_PCI_BAR_RD_BUF_SIZE-len,
+					"\n%05x: ", offset_label);
+		}
+	}
+
+	/* Set up the offset for next portion of pci bar read */
+	if (index == 0) {
+		idiag.offset.last_rd += LPFC_PCI_BAR_RD_SIZE;
+		if (acc_range == LPFC_PCI_BAR_BROWSE) {
+			if (idiag.offset.last_rd >= bar_size)
+				idiag.offset.last_rd = 0;
+		} else {
+			if (offset_run >= offset +
+			    (acc_range * sizeof(uint32_t)))
+				idiag.offset.last_rd = offset;
+		}
+	} else {
+		if (acc_range == LPFC_PCI_BAR_BROWSE)
+			idiag.offset.last_rd = 0;
+		else
+			idiag.offset.last_rd = offset;
+	}
+
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_baracc_write - Syntax check and set up idiag bar access commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and
+ * then perform the syntax check for PCI bar memory mapped space read or
+ * write command accordingly. In the case of PCI bar memory mapped space
+ * read command, it sets up the command in the idiag command struct for
+ * the debugfs read operation. In the case of PCI bar memorpy mapped space
+ * write operation, it executes the write operation into the PCI bar memory
+ * mapped space accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ */
+static ssize_t
+lpfc_idiag_baracc_write(struct file *file, const char __user *buf,
+			size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	uint32_t bar_num, bar_size, offset, value, acc_range;
+	struct pci_dev *pdev;
+	void __iomem *mem_mapped_bar;
+	uint32_t if_type;
+	uint32_t u32val;
+	int rc;
+
+	pdev = phba->pcidev;
+	if (!pdev)
+		return -EFAULT;
+
+	/* This is a user write operation */
+	debug->op = LPFC_IDIAG_OP_WR;
+
+	rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+	if (rc < 0)
+		return rc;
+
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	bar_num = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX];
+
+	if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+		if ((bar_num != IDIAG_BARACC_BAR_0) &&
+		    (bar_num != IDIAG_BARACC_BAR_1) &&
+		    (bar_num != IDIAG_BARACC_BAR_2))
+			goto error_out;
+	} else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+		if (bar_num != IDIAG_BARACC_BAR_0)
+			goto error_out;
+	} else
+		goto error_out;
+
+	if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+		if (bar_num == IDIAG_BARACC_BAR_0) {
+			idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
+				LPFC_PCI_IF0_BAR0_SIZE;
+			mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
+		} else if (bar_num == IDIAG_BARACC_BAR_1) {
+			idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
+				LPFC_PCI_IF0_BAR1_SIZE;
+			mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p;
+		} else if (bar_num == IDIAG_BARACC_BAR_2) {
+			idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
+				LPFC_PCI_IF0_BAR2_SIZE;
+			mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p;
+		} else
+			goto error_out;
+	} else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+		if (bar_num == IDIAG_BARACC_BAR_0) {
+			idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
+				LPFC_PCI_IF2_BAR0_SIZE;
+			mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
+		} else
+			goto error_out;
+	} else
+		goto error_out;
+
+	offset = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX];
+	if (offset % sizeof(uint32_t))
+		goto error_out;
+
+	bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX];
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) {
+		/* Sanity check on PCI config read command line arguments */
+		if (rc != LPFC_PCI_BAR_RD_CMD_ARG)
+			goto error_out;
+		acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX];
+		if (acc_range == LPFC_PCI_BAR_BROWSE) {
+			if (offset > bar_size - sizeof(uint32_t))
+				goto error_out;
+			/* Starting offset to browse */
+			idiag.offset.last_rd = offset;
+		} else if (acc_range > SINGLE_WORD) {
+			if (offset + acc_range * sizeof(uint32_t) > bar_size)
+				goto error_out;
+			/* Starting offset to browse */
+			idiag.offset.last_rd = offset;
+		} else if (acc_range != SINGLE_WORD)
+			goto error_out;
+	} else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR ||
+		   idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST ||
+		   idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) {
+		/* Sanity check on PCI bar write command line arguments */
+		if (rc != LPFC_PCI_BAR_WR_CMD_ARG)
+			goto error_out;
+		/* Write command to PCI bar space, read-modify-write */
+		acc_range = SINGLE_WORD;
+		value = idiag.cmd.data[IDIAG_BARACC_REG_VAL_INDX];
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR) {
+			writel(value, mem_mapped_bar + offset);
+			readl(mem_mapped_bar + offset);
+		}
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST) {
+			u32val = readl(mem_mapped_bar + offset);
+			u32val |= value;
+			writel(u32val, mem_mapped_bar + offset);
+			readl(mem_mapped_bar + offset);
+		}
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) {
+			u32val = readl(mem_mapped_bar + offset);
+			u32val &= ~value;
+			writel(u32val, mem_mapped_bar + offset);
+			readl(mem_mapped_bar + offset);
+		}
+	} else
+		/* All other opecodes are illegal for now */
+		goto error_out;
+
+	return nbytes;
+error_out:
+	memset(&idiag, 0, sizeof(idiag));
+	return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_queinfo_read - idiag debugfs read queue information
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba SLI4 PCI function queue information,
+ * and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
+			loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	int len = 0, fcp_qidx;
+	char *pbuffer;
+
+	if (!debug->buffer)
+		debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL);
+	if (!debug->buffer)
+		return 0;
+	pbuffer = debug->buffer;
+
+	if (*ppos)
+		return 0;
+
+	/* Get slow-path event queue information */
+	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"Slow-path EQ information:\n");
+	if (phba->sli4_hba.sp_eq) {
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"\tEQID[%02d], "
+			"QE-COUNT[%04d], QE-SIZE[%04d], "
+			"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
+			phba->sli4_hba.sp_eq->queue_id,
+			phba->sli4_hba.sp_eq->entry_count,
+			phba->sli4_hba.sp_eq->entry_size,
+			phba->sli4_hba.sp_eq->host_index,
+			phba->sli4_hba.sp_eq->hba_index);
+	}
+
+	/* Get fast-path event queue information */
+	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"Fast-path EQ information:\n");
+	if (phba->sli4_hba.fp_eq) {
+		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+		     fcp_qidx++) {
+			if (phba->sli4_hba.fp_eq[fcp_qidx]) {
+				len += snprintf(pbuffer+len,
+					LPFC_QUE_INFO_GET_BUF_SIZE-len,
+				"\tEQID[%02d], "
+				"QE-COUNT[%04d], QE-SIZE[%04d], "
+				"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
+				phba->sli4_hba.fp_eq[fcp_qidx]->queue_id,
+				phba->sli4_hba.fp_eq[fcp_qidx]->entry_count,
+				phba->sli4_hba.fp_eq[fcp_qidx]->entry_size,
+				phba->sli4_hba.fp_eq[fcp_qidx]->host_index,
+				phba->sli4_hba.fp_eq[fcp_qidx]->hba_index);
+			}
+		}
+	}
+	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+
+	/* Get mailbox complete queue information */
+	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"Slow-path MBX CQ information:\n");
+	if (phba->sli4_hba.mbx_cq) {
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"Associated EQID[%02d]:\n",
+			phba->sli4_hba.mbx_cq->assoc_qid);
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"\tCQID[%02d], "
+			"QE-COUNT[%04d], QE-SIZE[%04d], "
+			"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
+			phba->sli4_hba.mbx_cq->queue_id,
+			phba->sli4_hba.mbx_cq->entry_count,
+			phba->sli4_hba.mbx_cq->entry_size,
+			phba->sli4_hba.mbx_cq->host_index,
+			phba->sli4_hba.mbx_cq->hba_index);
+	}
+
+	/* Get slow-path complete queue information */
+	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"Slow-path ELS CQ information:\n");
+	if (phba->sli4_hba.els_cq) {
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"Associated EQID[%02d]:\n",
+			phba->sli4_hba.els_cq->assoc_qid);
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"\tCQID [%02d], "
+			"QE-COUNT[%04d], QE-SIZE[%04d], "
+			"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
+			phba->sli4_hba.els_cq->queue_id,
+			phba->sli4_hba.els_cq->entry_count,
+			phba->sli4_hba.els_cq->entry_size,
+			phba->sli4_hba.els_cq->host_index,
+			phba->sli4_hba.els_cq->hba_index);
+	}
+
+	/* Get fast-path complete queue information */
+	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"Fast-path FCP CQ information:\n");
+	fcp_qidx = 0;
+	if (phba->sli4_hba.fcp_cq) {
+		do {
+			if (phba->sli4_hba.fcp_cq[fcp_qidx]) {
+				len += snprintf(pbuffer+len,
+					LPFC_QUE_INFO_GET_BUF_SIZE-len,
+				"Associated EQID[%02d]:\n",
+				phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
+				len += snprintf(pbuffer+len,
+					LPFC_QUE_INFO_GET_BUF_SIZE-len,
+				"\tCQID[%02d], "
+				"QE-COUNT[%04d], QE-SIZE[%04d], "
+				"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
+				phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id,
+				phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count,
+				phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
+				phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
+				phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
+			}
+		} while (++fcp_qidx < phba->cfg_fcp_eq_count);
+		len += snprintf(pbuffer+len,
+				LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+	}
+
+	/* Get mailbox queue information */
+	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"Slow-path MBX MQ information:\n");
+	if (phba->sli4_hba.mbx_wq) {
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"Associated CQID[%02d]:\n",
+			phba->sli4_hba.mbx_wq->assoc_qid);
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"\tWQID[%02d], "
+			"QE-COUNT[%04d], QE-SIZE[%04d], "
+			"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
+			phba->sli4_hba.mbx_wq->queue_id,
+			phba->sli4_hba.mbx_wq->entry_count,
+			phba->sli4_hba.mbx_wq->entry_size,
+			phba->sli4_hba.mbx_wq->host_index,
+			phba->sli4_hba.mbx_wq->hba_index);
+	}
+
+	/* Get slow-path work queue information */
+	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"Slow-path ELS WQ information:\n");
+	if (phba->sli4_hba.els_wq) {
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"Associated CQID[%02d]:\n",
+			phba->sli4_hba.els_wq->assoc_qid);
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"\tWQID[%02d], "
+			"QE-COUNT[%04d], QE-SIZE[%04d], "
+			"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
+			phba->sli4_hba.els_wq->queue_id,
+			phba->sli4_hba.els_wq->entry_count,
+			phba->sli4_hba.els_wq->entry_size,
+			phba->sli4_hba.els_wq->host_index,
+			phba->sli4_hba.els_wq->hba_index);
+	}
+
+	/* Get fast-path work queue information */
+	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"Fast-path FCP WQ information:\n");
+	if (phba->sli4_hba.fcp_wq) {
+		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
+		     fcp_qidx++) {
+			if (!phba->sli4_hba.fcp_wq[fcp_qidx])
+				continue;
+			len += snprintf(pbuffer+len,
+					LPFC_QUE_INFO_GET_BUF_SIZE-len,
+				"Associated CQID[%02d]:\n",
+				phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid);
+			len += snprintf(pbuffer+len,
+					LPFC_QUE_INFO_GET_BUF_SIZE-len,
+				"\tWQID[%02d], "
+				"QE-COUNT[%04d], WQE-SIZE[%04d], "
+				"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
+				phba->sli4_hba.fcp_wq[fcp_qidx]->queue_id,
+				phba->sli4_hba.fcp_wq[fcp_qidx]->entry_count,
+				phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size,
+				phba->sli4_hba.fcp_wq[fcp_qidx]->host_index,
+				phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index);
+		}
+		len += snprintf(pbuffer+len,
+				LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+	}
+
+	/* Get receive queue information */
+	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"Slow-path RQ information:\n");
+	if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) {
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"Associated CQID[%02d]:\n",
+			phba->sli4_hba.hdr_rq->assoc_qid);
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"\tHQID[%02d], "
+			"QE-COUNT[%04d], QE-SIZE[%04d], "
+			"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
+			phba->sli4_hba.hdr_rq->queue_id,
+			phba->sli4_hba.hdr_rq->entry_count,
+			phba->sli4_hba.hdr_rq->entry_size,
+			phba->sli4_hba.hdr_rq->host_index,
+			phba->sli4_hba.hdr_rq->hba_index);
+		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+			"\tDQID[%02d], "
+			"QE-COUNT[%04d], QE-SIZE[%04d], "
+			"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
+			phba->sli4_hba.dat_rq->queue_id,
+			phba->sli4_hba.dat_rq->entry_count,
+			phba->sli4_hba.dat_rq->entry_size,
+			phba->sli4_hba.dat_rq->host_index,
+			phba->sli4_hba.dat_rq->hba_index);
+	}
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_que_param_check - queue access command parameter sanity check
+ * @q: The pointer to queue structure.
+ * @index: The index into a queue entry.
+ * @count: The number of queue entries to access.
+ *
+ * Description:
+ * The routine performs sanity check on device queue access method commands.
+ *
+ * Returns:
+ * This function returns -EINVAL when fails the sanity check, otherwise, it
+ * returns 0.
+ **/
+static int
+lpfc_idiag_que_param_check(struct lpfc_queue *q, int index, int count)
+{
+	/* Only support single entry read or browsing */
+	if ((count != 1) && (count != LPFC_QUE_ACC_BROWSE))
+		return -EINVAL;
+	if (index > q->entry_count - 1)
+		return -EINVAL;
+	return 0;
+}
+
+/**
+ * lpfc_idiag_queacc_read_qe - read a single entry from the given queue index
+ * @pbuffer: The pointer to buffer to copy the read data into.
+ * @pque: The pointer to the queue to be read.
+ * @index: The index into the queue entry.
+ *
+ * Description:
+ * This routine reads out a single entry from the given queue's index location
+ * and copies it into the buffer provided.
+ *
+ * Returns:
+ * This function returns 0 when it fails, otherwise, it returns the length of
+ * the data read into the buffer provided.
+ **/
+static int
+lpfc_idiag_queacc_read_qe(char *pbuffer, int len, struct lpfc_queue *pque,
+			  uint32_t index)
+{
+	int offset, esize;
+	uint32_t *pentry;
+
+	if (!pbuffer || !pque)
+		return 0;
+
+	esize = pque->entry_size;
+	len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
+			"QE-INDEX[%04d]:\n", index);
+
+	offset = 0;
+	pentry = pque->qe[index].address;
+	while (esize > 0) {
+		len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len,
+				"%08x ", *pentry);
+		pentry++;
+		offset += sizeof(uint32_t);
+		esize -= sizeof(uint32_t);
+		if (esize > 0 && !(offset % (4 * sizeof(uint32_t))))
+			len += snprintf(pbuffer+len,
+					LPFC_QUE_ACC_BUF_SIZE-len, "\n");
+	}
+	len += snprintf(pbuffer+len, LPFC_QUE_ACC_BUF_SIZE-len, "\n");
+
+	return len;
+}
+
+/**
+ * lpfc_idiag_queacc_read - idiag debugfs read port queue
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba device queue memory according to the
+ * idiag command, and copies to user @buf. Depending on the queue dump read
+ * command setup, it does either a single queue entry read or browing through
+ * all entries of the queue.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_queacc_read(struct file *file, char __user *buf, size_t nbytes,
+		       loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	uint32_t last_index, index, count;
+	struct lpfc_queue *pque = NULL;
+	char *pbuffer;
+	int len = 0;
+
+	/* This is a user read operation */
+	debug->op = LPFC_IDIAG_OP_RD;
+
+	if (!debug->buffer)
+		debug->buffer = kmalloc(LPFC_QUE_ACC_BUF_SIZE, GFP_KERNEL);
+	if (!debug->buffer)
+		return 0;
+	pbuffer = debug->buffer;
+
+	if (*ppos)
+		return 0;
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) {
+		index = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX];
+		count = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX];
+		pque = (struct lpfc_queue *)idiag.ptr_private;
+	} else
+		return 0;
+
+	/* Browse the queue starting from index */
+	if (count == LPFC_QUE_ACC_BROWSE)
+		goto que_browse;
+
+	/* Read a single entry from the queue */
+	len = lpfc_idiag_queacc_read_qe(pbuffer, len, pque, index);
+
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+
+que_browse:
+
+	/* Browse all entries from the queue */
+	last_index = idiag.offset.last_rd;
+	index = last_index;
+
+	while (len < LPFC_QUE_ACC_SIZE - pque->entry_size) {
+		len = lpfc_idiag_queacc_read_qe(pbuffer, len, pque, index);
+		index++;
+		if (index > pque->entry_count - 1)
+			break;
+	}
+
+	/* Set up the offset for next portion of pci cfg read */
+	if (index > pque->entry_count - 1)
+		index = 0;
+	idiag.offset.last_rd = index;
+
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_queacc_write - Syntax check and set up idiag queacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for port queue read (dump) or write (set) command
+ * accordingly. In the case of port queue read command, it sets up the command
+ * in the idiag command struct for the following debugfs read operation. In
+ * the case of port queue write operation, it executes the write operation
+ * into the port queue entry accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
+			size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	uint32_t qidx, quetp, queid, index, count, offset, value;
+	uint32_t *pentry;
+	struct lpfc_queue *pque;
+	int rc;
+
+	/* This is a user write operation */
+	debug->op = LPFC_IDIAG_OP_WR;
+
+	rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+	if (rc < 0)
+		return rc;
+
+	/* Get and sanity check on command feilds */
+	quetp  = idiag.cmd.data[IDIAG_QUEACC_QUETP_INDX];
+	queid  = idiag.cmd.data[IDIAG_QUEACC_QUEID_INDX];
+	index  = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX];
+	count  = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX];
+	offset = idiag.cmd.data[IDIAG_QUEACC_OFFST_INDX];
+	value  = idiag.cmd.data[IDIAG_QUEACC_VALUE_INDX];
+
+	/* Sanity check on command line arguments */
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) {
+		if (rc != LPFC_QUE_ACC_WR_CMD_ARG)
+			goto error_out;
+		if (count != 1)
+			goto error_out;
+	} else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) {
+		if (rc != LPFC_QUE_ACC_RD_CMD_ARG)
+			goto error_out;
+	} else
+		goto error_out;
+
+	switch (quetp) {
+	case LPFC_IDIAG_EQ:
+		/* Slow-path event queue */
+		if (phba->sli4_hba.sp_eq &&
+		    phba->sli4_hba.sp_eq->queue_id == queid) {
+			/* Sanity check */
+			rc = lpfc_idiag_que_param_check(
+					phba->sli4_hba.sp_eq, index, count);
+			if (rc)
+				goto error_out;
+			idiag.ptr_private = phba->sli4_hba.sp_eq;
+			goto pass_check;
+		}
+		/* Fast-path event queue */
+		if (phba->sli4_hba.fp_eq) {
+			for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
+				if (phba->sli4_hba.fp_eq[qidx] &&
+				    phba->sli4_hba.fp_eq[qidx]->queue_id ==
+				    queid) {
+					/* Sanity check */
+					rc = lpfc_idiag_que_param_check(
+						phba->sli4_hba.fp_eq[qidx],
+						index, count);
+					if (rc)
+						goto error_out;
+					idiag.ptr_private =
+						phba->sli4_hba.fp_eq[qidx];
+					goto pass_check;
+				}
+			}
+		}
+		goto error_out;
+		break;
+	case LPFC_IDIAG_CQ:
+		/* MBX complete queue */
+		if (phba->sli4_hba.mbx_cq &&
+		    phba->sli4_hba.mbx_cq->queue_id == queid) {
+			/* Sanity check */
+			rc = lpfc_idiag_que_param_check(
+					phba->sli4_hba.mbx_cq, index, count);
+			if (rc)
+				goto error_out;
+			idiag.ptr_private = phba->sli4_hba.mbx_cq;
+			goto pass_check;
+		}
+		/* ELS complete queue */
+		if (phba->sli4_hba.els_cq &&
+		    phba->sli4_hba.els_cq->queue_id == queid) {
+			/* Sanity check */
+			rc = lpfc_idiag_que_param_check(
+					phba->sli4_hba.els_cq, index, count);
+			if (rc)
+				goto error_out;
+			idiag.ptr_private = phba->sli4_hba.els_cq;
+			goto pass_check;
+		}
+		/* FCP complete queue */
+		if (phba->sli4_hba.fcp_cq) {
+			qidx = 0;
+			do {
+				if (phba->sli4_hba.fcp_cq[qidx] &&
+				    phba->sli4_hba.fcp_cq[qidx]->queue_id ==
+				    queid) {
+					/* Sanity check */
+					rc = lpfc_idiag_que_param_check(
+						phba->sli4_hba.fcp_cq[qidx],
+						index, count);
+					if (rc)
+						goto error_out;
+					idiag.ptr_private =
+						phba->sli4_hba.fcp_cq[qidx];
+					goto pass_check;
+				}
+			} while (++qidx < phba->cfg_fcp_eq_count);
+		}
+		goto error_out;
+		break;
+	case LPFC_IDIAG_MQ:
+		/* MBX work queue */
+		if (phba->sli4_hba.mbx_wq &&
+		    phba->sli4_hba.mbx_wq->queue_id == queid) {
+			/* Sanity check */
+			rc = lpfc_idiag_que_param_check(
+					phba->sli4_hba.mbx_wq, index, count);
+			if (rc)
+				goto error_out;
+			idiag.ptr_private = phba->sli4_hba.mbx_wq;
+			goto pass_check;
+		}
+		goto error_out;
+		break;
+	case LPFC_IDIAG_WQ:
+		/* ELS work queue */
+		if (phba->sli4_hba.els_wq &&
+		    phba->sli4_hba.els_wq->queue_id == queid) {
+			/* Sanity check */
+			rc = lpfc_idiag_que_param_check(
+					phba->sli4_hba.els_wq, index, count);
+			if (rc)
+				goto error_out;
+			idiag.ptr_private = phba->sli4_hba.els_wq;
+			goto pass_check;
+		}
+		/* FCP work queue */
+		if (phba->sli4_hba.fcp_wq) {
+			for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) {
+				if (!phba->sli4_hba.fcp_wq[qidx])
+					continue;
+				if (phba->sli4_hba.fcp_wq[qidx]->queue_id ==
+				    queid) {
+					/* Sanity check */
+					rc = lpfc_idiag_que_param_check(
+						phba->sli4_hba.fcp_wq[qidx],
+						index, count);
+					if (rc)
+						goto error_out;
+					idiag.ptr_private =
+						phba->sli4_hba.fcp_wq[qidx];
+					goto pass_check;
+				}
+			}
+		}
+		goto error_out;
+		break;
+	case LPFC_IDIAG_RQ:
+		/* HDR queue */
+		if (phba->sli4_hba.hdr_rq &&
+		    phba->sli4_hba.hdr_rq->queue_id == queid) {
+			/* Sanity check */
+			rc = lpfc_idiag_que_param_check(
+					phba->sli4_hba.hdr_rq, index, count);
+			if (rc)
+				goto error_out;
+			idiag.ptr_private = phba->sli4_hba.hdr_rq;
+			goto pass_check;
+		}
+		/* DAT queue */
+		if (phba->sli4_hba.dat_rq &&
+		    phba->sli4_hba.dat_rq->queue_id == queid) {
+			/* Sanity check */
+			rc = lpfc_idiag_que_param_check(
+					phba->sli4_hba.dat_rq, index, count);
+			if (rc)
+				goto error_out;
+			idiag.ptr_private = phba->sli4_hba.dat_rq;
+			goto pass_check;
+		}
+		goto error_out;
+		break;
+	default:
+		goto error_out;
+		break;
+	}
+
+pass_check:
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) {
+		if (count == LPFC_QUE_ACC_BROWSE)
+			idiag.offset.last_rd = index;
+	}
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL) {
+		/* Additional sanity checks on write operation */
+		pque = (struct lpfc_queue *)idiag.ptr_private;
+		if (offset > pque->entry_size/sizeof(uint32_t) - 1)
+			goto error_out;
+		pentry = pque->qe[index].address;
+		pentry += offset;
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR)
+			*pentry = value;
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_ST)
+			*pentry |= value;
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_CL)
+			*pentry &= ~value;
+	}
+	return nbytes;
+
+error_out:
+	/* Clean out command structure on command error out */
+	memset(&idiag, 0, sizeof(idiag));
+	return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_drbacc_read_reg - idiag debugfs read a doorbell register
+ * @phba: The pointer to hba structure.
+ * @pbuffer: The pointer to the buffer to copy the data to.
+ * @len: The lenght of bytes to copied.
+ * @drbregid: The id to doorbell registers.
+ *
+ * Description:
+ * This routine reads a doorbell register and copies its content to the
+ * user buffer pointed to by @pbuffer.
+ *
+ * Returns:
+ * This function returns the amount of data that was copied into @pbuffer.
+ **/
+static int
+lpfc_idiag_drbacc_read_reg(struct lpfc_hba *phba, char *pbuffer,
+			   int len, uint32_t drbregid)
+{
+
+	if (!pbuffer)
+		return 0;
+
+	switch (drbregid) {
+	case LPFC_DRB_EQCQ:
+		len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+				"EQCQ-DRB-REG: 0x%08x\n",
+				readl(phba->sli4_hba.EQCQDBregaddr));
+		break;
+	case LPFC_DRB_MQ:
+		len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+				"MQ-DRB-REG:   0x%08x\n",
+				readl(phba->sli4_hba.MQDBregaddr));
+		break;
+	case LPFC_DRB_WQ:
+		len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+				"WQ-DRB-REG:   0x%08x\n",
+				readl(phba->sli4_hba.WQDBregaddr));
+		break;
+	case LPFC_DRB_RQ:
+		len += snprintf(pbuffer+len, LPFC_DRB_ACC_BUF_SIZE-len,
+				"RQ-DRB-REG:   0x%08x\n",
+				readl(phba->sli4_hba.RQDBregaddr));
+		break;
+	default:
+		break;
+	}
+
+	return len;
+}
+
+/**
+ * lpfc_idiag_drbacc_read - idiag debugfs read port doorbell
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba device doorbell register according
+ * to the idiag command, and copies to user @buf. Depending on the doorbell
+ * register read command setup, it does either a single doorbell register
+ * read or dump all doorbell registers.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_drbacc_read(struct file *file, char __user *buf, size_t nbytes,
+		       loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	uint32_t drb_reg_id, i;
+	char *pbuffer;
+	int len = 0;
+
+	/* This is a user read operation */
+	debug->op = LPFC_IDIAG_OP_RD;
+
+	if (!debug->buffer)
+		debug->buffer = kmalloc(LPFC_DRB_ACC_BUF_SIZE, GFP_KERNEL);
+	if (!debug->buffer)
+		return 0;
+	pbuffer = debug->buffer;
+
+	if (*ppos)
+		return 0;
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD)
+		drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX];
+	else
+		return 0;
+
+	if (drb_reg_id == LPFC_DRB_ACC_ALL)
+		for (i = 1; i <= LPFC_DRB_MAX; i++)
+			len = lpfc_idiag_drbacc_read_reg(phba,
+							 pbuffer, len, i);
+	else
+		len = lpfc_idiag_drbacc_read_reg(phba,
+						 pbuffer, len, drb_reg_id);
+
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_drbacc_write - Syntax check and set up idiag drbacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for port doorbell register read (dump) or write
+ * (set) command accordingly. In the case of port queue read command, it sets
+ * up the command in the idiag command struct for the following debugfs read
+ * operation. In the case of port doorbell register write operation, it
+ * executes the write operation into the port doorbell register accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_drbacc_write(struct file *file, const char __user *buf,
+			size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	uint32_t drb_reg_id, value, reg_val = 0;
+	void __iomem *drb_reg;
+	int rc;
+
+	/* This is a user write operation */
+	debug->op = LPFC_IDIAG_OP_WR;
+
+	rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+	if (rc < 0)
+		return rc;
+
+	/* Sanity check on command line arguments */
+	drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX];
+	value = idiag.cmd.data[IDIAG_DRBACC_VALUE_INDX];
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) {
+		if (rc != LPFC_DRB_ACC_WR_CMD_ARG)
+			goto error_out;
+		if (drb_reg_id > LPFC_DRB_MAX)
+			goto error_out;
+	} else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD) {
+		if (rc != LPFC_DRB_ACC_RD_CMD_ARG)
+			goto error_out;
+		if ((drb_reg_id > LPFC_DRB_MAX) &&
+		    (drb_reg_id != LPFC_DRB_ACC_ALL))
+			goto error_out;
+	} else
+		goto error_out;
+
+	/* Perform the write access operation */
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) {
+		switch (drb_reg_id) {
+		case LPFC_DRB_EQCQ:
+			drb_reg = phba->sli4_hba.EQCQDBregaddr;
+			break;
+		case LPFC_DRB_MQ:
+			drb_reg = phba->sli4_hba.MQDBregaddr;
+			break;
+		case LPFC_DRB_WQ:
+			drb_reg = phba->sli4_hba.WQDBregaddr;
+			break;
+		case LPFC_DRB_RQ:
+			drb_reg = phba->sli4_hba.RQDBregaddr;
+			break;
+		default:
+			goto error_out;
+		}
+
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR)
+			reg_val = value;
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST) {
+			reg_val = readl(drb_reg);
+			reg_val |= value;
+		}
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_CL) {
+			reg_val = readl(drb_reg);
+			reg_val &= ~value;
+		}
+		writel(reg_val, drb_reg);
+		readl(drb_reg); /* flush */
+	}
+	return nbytes;
+
+error_out:
+	/* Clean out command structure on command error out */
+	memset(&idiag, 0, sizeof(idiag));
+	return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_ctlacc_read_reg - idiag debugfs read a control registers
+ * @phba: The pointer to hba structure.
+ * @pbuffer: The pointer to the buffer to copy the data to.
+ * @len: The lenght of bytes to copied.
+ * @drbregid: The id to doorbell registers.
+ *
+ * Description:
+ * This routine reads a control register and copies its content to the
+ * user buffer pointed to by @pbuffer.
+ *
+ * Returns:
+ * This function returns the amount of data that was copied into @pbuffer.
+ **/
+static int
+lpfc_idiag_ctlacc_read_reg(struct lpfc_hba *phba, char *pbuffer,
+			   int len, uint32_t ctlregid)
+{
+
+	if (!pbuffer)
+		return 0;
+
+	switch (ctlregid) {
+	case LPFC_CTL_PORT_SEM:
+		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+				"Port SemReg:   0x%08x\n",
+				readl(phba->sli4_hba.conf_regs_memmap_p +
+				      LPFC_CTL_PORT_SEM_OFFSET));
+		break;
+	case LPFC_CTL_PORT_STA:
+		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+				"Port StaReg:   0x%08x\n",
+				readl(phba->sli4_hba.conf_regs_memmap_p +
+				      LPFC_CTL_PORT_STA_OFFSET));
+		break;
+	case LPFC_CTL_PORT_CTL:
+		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+				"Port CtlReg:   0x%08x\n",
+				readl(phba->sli4_hba.conf_regs_memmap_p +
+				      LPFC_CTL_PORT_CTL_OFFSET));
+		break;
+	case LPFC_CTL_PORT_ER1:
+		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+				"Port Er1Reg:   0x%08x\n",
+				readl(phba->sli4_hba.conf_regs_memmap_p +
+				      LPFC_CTL_PORT_ER1_OFFSET));
+		break;
+	case LPFC_CTL_PORT_ER2:
+		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+				"Port Er2Reg:   0x%08x\n",
+				readl(phba->sli4_hba.conf_regs_memmap_p +
+				      LPFC_CTL_PORT_ER2_OFFSET));
+		break;
+	case LPFC_CTL_PDEV_CTL:
+		len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+				"PDev CtlReg:   0x%08x\n",
+				readl(phba->sli4_hba.conf_regs_memmap_p +
+				      LPFC_CTL_PDEV_CTL_OFFSET));
+		break;
+	default:
+		break;
+	}
+	return len;
+}
+
+/**
+ * lpfc_idiag_ctlacc_read - idiag debugfs read port and device control register
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba port and device registers according
+ * to the idiag command, and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_ctlacc_read(struct file *file, char __user *buf, size_t nbytes,
+		       loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	uint32_t ctl_reg_id, i;
+	char *pbuffer;
+	int len = 0;
+
+	/* This is a user read operation */
+	debug->op = LPFC_IDIAG_OP_RD;
+
+	if (!debug->buffer)
+		debug->buffer = kmalloc(LPFC_CTL_ACC_BUF_SIZE, GFP_KERNEL);
+	if (!debug->buffer)
+		return 0;
+	pbuffer = debug->buffer;
+
+	if (*ppos)
+		return 0;
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD)
+		ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX];
+	else
+		return 0;
+
+	if (ctl_reg_id == LPFC_CTL_ACC_ALL)
+		for (i = 1; i <= LPFC_CTL_MAX; i++)
+			len = lpfc_idiag_ctlacc_read_reg(phba,
+							 pbuffer, len, i);
+	else
+		len = lpfc_idiag_ctlacc_read_reg(phba,
+						 pbuffer, len, ctl_reg_id);
+
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_ctlacc_write - Syntax check and set up idiag ctlacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for port and device control register read (dump)
+ * or write (set) command accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_ctlacc_write(struct file *file, const char __user *buf,
+			size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	uint32_t ctl_reg_id, value, reg_val = 0;
+	void __iomem *ctl_reg;
+	int rc;
+
+	/* This is a user write operation */
+	debug->op = LPFC_IDIAG_OP_WR;
+
+	rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+	if (rc < 0)
+		return rc;
+
+	/* Sanity check on command line arguments */
+	ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX];
+	value = idiag.cmd.data[IDIAG_CTLACC_VALUE_INDX];
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
+		if (rc != LPFC_CTL_ACC_WR_CMD_ARG)
+			goto error_out;
+		if (ctl_reg_id > LPFC_CTL_MAX)
+			goto error_out;
+	} else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD) {
+		if (rc != LPFC_CTL_ACC_RD_CMD_ARG)
+			goto error_out;
+		if ((ctl_reg_id > LPFC_CTL_MAX) &&
+		    (ctl_reg_id != LPFC_CTL_ACC_ALL))
+			goto error_out;
+	} else
+		goto error_out;
+
+	/* Perform the write access operation */
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST ||
+	    idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
+		switch (ctl_reg_id) {
+		case LPFC_CTL_PORT_SEM:
+			ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+					LPFC_CTL_PORT_SEM_OFFSET;
+			break;
+		case LPFC_CTL_PORT_STA:
+			ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+					LPFC_CTL_PORT_STA_OFFSET;
+			break;
+		case LPFC_CTL_PORT_CTL:
+			ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+					LPFC_CTL_PORT_CTL_OFFSET;
+			break;
+		case LPFC_CTL_PORT_ER1:
+			ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+					LPFC_CTL_PORT_ER1_OFFSET;
+			break;
+		case LPFC_CTL_PORT_ER2:
+			ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+					LPFC_CTL_PORT_ER2_OFFSET;
+			break;
+		case LPFC_CTL_PDEV_CTL:
+			ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+					LPFC_CTL_PDEV_CTL_OFFSET;
+			break;
+		default:
+			goto error_out;
+		}
+
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR)
+			reg_val = value;
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST) {
+			reg_val = readl(ctl_reg);
+			reg_val |= value;
+		}
+		if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
+			reg_val = readl(ctl_reg);
+			reg_val &= ~value;
+		}
+		writel(reg_val, ctl_reg);
+		readl(ctl_reg); /* flush */
+	}
+	return nbytes;
+
+error_out:
+	/* Clean out command structure on command error out */
+	memset(&idiag, 0, sizeof(idiag));
+	return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_mbxacc_get_setup - idiag debugfs get mailbox access setup
+ * @phba: Pointer to HBA context object.
+ * @pbuffer: Pointer to data buffer.
+ *
+ * Description:
+ * This routine gets the driver mailbox access debugfs setup information.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static int
+lpfc_idiag_mbxacc_get_setup(struct lpfc_hba *phba, char *pbuffer)
+{
+	uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd;
+	int len = 0;
+
+	mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
+	mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
+	mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
+	mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
+
+	len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+			"mbx_dump_map: 0x%08x\n", mbx_dump_map);
+	len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+			"mbx_dump_cnt: %04d\n", mbx_dump_cnt);
+	len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+			"mbx_word_cnt: %04d\n", mbx_word_cnt);
+	len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+			"mbx_mbox_cmd: 0x%02x\n", mbx_mbox_cmd);
+
+	return len;
+}
+
+/**
+ * lpfc_idiag_mbxacc_read - idiag debugfs read on mailbox access
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba driver mailbox access debugfs setup
+ * information.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_mbxacc_read(struct file *file, char __user *buf, size_t nbytes,
+		       loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	char *pbuffer;
+	int len = 0;
+
+	/* This is a user read operation */
+	debug->op = LPFC_IDIAG_OP_RD;
+
+	if (!debug->buffer)
+		debug->buffer = kmalloc(LPFC_MBX_ACC_BUF_SIZE, GFP_KERNEL);
+	if (!debug->buffer)
+		return 0;
+	pbuffer = debug->buffer;
+
+	if (*ppos)
+		return 0;
+
+	if ((idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP) &&
+	    (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP))
+		return 0;
+
+	len = lpfc_idiag_mbxacc_get_setup(phba, pbuffer);
+
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_mbxacc_write - Syntax check and set up idiag mbxacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for driver mailbox command (dump) and sets up the
+ * necessary states in the idiag command struct accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_mbxacc_write(struct file *file, const char __user *buf,
+			size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd;
+	int rc;
+
+	/* This is a user write operation */
+	debug->op = LPFC_IDIAG_OP_WR;
+
+	rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+	if (rc < 0)
+		return rc;
+
+	/* Sanity check on command line arguments */
+	mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
+	mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
+	mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
+	mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
+
+	if (idiag.cmd.opcode == LPFC_IDIAG_CMD_MBXACC_DP) {
+		if (!(mbx_dump_map & LPFC_MBX_DMP_MBX_ALL))
+			goto error_out;
+		if ((mbx_dump_map & ~LPFC_MBX_DMP_MBX_ALL) &&
+		    (mbx_dump_map != LPFC_MBX_DMP_ALL))
+			goto error_out;
+		if (mbx_word_cnt > sizeof(MAILBOX_t))
+			goto error_out;
+	} else if (idiag.cmd.opcode == LPFC_IDIAG_BSG_MBXACC_DP) {
+		if (!(mbx_dump_map & LPFC_BSG_DMP_MBX_ALL))
+			goto error_out;
+		if ((mbx_dump_map & ~LPFC_BSG_DMP_MBX_ALL) &&
+		    (mbx_dump_map != LPFC_MBX_DMP_ALL))
+			goto error_out;
+		if (mbx_word_cnt > (BSG_MBOX_SIZE)/4)
+			goto error_out;
+		if (mbx_mbox_cmd != 0x9b)
+			goto error_out;
+	} else
+		goto error_out;
+
+	if (mbx_word_cnt == 0)
+		goto error_out;
+	if (rc != LPFC_MBX_DMP_ARG)
+		goto error_out;
+	if (mbx_mbox_cmd & ~0xff)
+		goto error_out;
+
+	/* condition for stop mailbox dump */
+	if (mbx_dump_cnt == 0)
+		goto reset_out;
+
+	return nbytes;
+
+reset_out:
+	/* Clean out command structure on command error out */
+	memset(&idiag, 0, sizeof(idiag));
+	return nbytes;
+
+error_out:
+	/* Clean out command structure on command error out */
+	memset(&idiag, 0, sizeof(idiag));
+	return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_extacc_avail_get - get the available extents information
+ * @phba: pointer to lpfc hba data structure.
+ * @pbuffer: pointer to internal buffer.
+ * @len: length into the internal buffer data has been copied.
+ *
+ * Description:
+ * This routine is to get the available extent information.
+ *
+ * Returns:
+ * overall lenth of the data read into the internal buffer.
+ **/
+static int
+lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
+{
+	uint16_t ext_cnt, ext_size;
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\nAvailable Extents Information:\n");
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tPort Available VPI extents: ");
+	lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VPI,
+				       &ext_cnt, &ext_size);
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"Count %3d, Size %3d\n", ext_cnt, ext_size);
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tPort Available VFI extents: ");
+	lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VFI,
+				       &ext_cnt, &ext_size);
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"Count %3d, Size %3d\n", ext_cnt, ext_size);
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tPort Available RPI extents: ");
+	lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_RPI,
+				       &ext_cnt, &ext_size);
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"Count %3d, Size %3d\n", ext_cnt, ext_size);
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tPort Available XRI extents: ");
+	lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_XRI,
+				       &ext_cnt, &ext_size);
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"Count %3d, Size %3d\n", ext_cnt, ext_size);
+
+	return len;
+}
+
+/**
+ * lpfc_idiag_extacc_alloc_get - get the allocated extents information
+ * @phba: pointer to lpfc hba data structure.
+ * @pbuffer: pointer to internal buffer.
+ * @len: length into the internal buffer data has been copied.
+ *
+ * Description:
+ * This routine is to get the allocated extent information.
+ *
+ * Returns:
+ * overall lenth of the data read into the internal buffer.
+ **/
+static int
+lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len)
+{
+	uint16_t ext_cnt, ext_size;
+	int rc;
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\nAllocated Extents Information:\n");
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tHost Allocated VPI extents: ");
+	rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VPI,
+					    &ext_cnt, &ext_size);
+	if (!rc)
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"Port %d Extent %3d, Size %3d\n",
+				phba->brd_no, ext_cnt, ext_size);
+	else
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"N/A\n");
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tHost Allocated VFI extents: ");
+	rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VFI,
+					    &ext_cnt, &ext_size);
+	if (!rc)
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"Port %d Extent %3d, Size %3d\n",
+				phba->brd_no, ext_cnt, ext_size);
+	else
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"N/A\n");
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tHost Allocated RPI extents: ");
+	rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_RPI,
+					    &ext_cnt, &ext_size);
+	if (!rc)
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"Port %d Extent %3d, Size %3d\n",
+				phba->brd_no, ext_cnt, ext_size);
+	else
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"N/A\n");
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tHost Allocated XRI extents: ");
+	rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_XRI,
+					    &ext_cnt, &ext_size);
+	if (!rc)
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"Port %d Extent %3d, Size %3d\n",
+				phba->brd_no, ext_cnt, ext_size);
+	else
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"N/A\n");
+
+	return len;
+}
+
+/**
+ * lpfc_idiag_extacc_drivr_get - get driver extent information
+ * @phba: pointer to lpfc hba data structure.
+ * @pbuffer: pointer to internal buffer.
+ * @len: length into the internal buffer data has been copied.
+ *
+ * Description:
+ * This routine is to get the driver extent information.
+ *
+ * Returns:
+ * overall lenth of the data read into the internal buffer.
+ **/
+static int
+lpfc_idiag_extacc_drivr_get(struct lpfc_hba *phba, char *pbuffer, int len)
+{
+	struct lpfc_rsrc_blks *rsrc_blks;
+	int index;
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\nDriver Extents Information:\n");
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tVPI extents:\n");
+	index = 0;
+	list_for_each_entry(rsrc_blks, &phba->lpfc_vpi_blk_list, list) {
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"\t\tBlock %3d: Start %4d, Count %4d\n",
+				index, rsrc_blks->rsrc_start,
+				rsrc_blks->rsrc_size);
+		index++;
+	}
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tVFI extents:\n");
+	index = 0;
+	list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_vfi_blk_list,
+			    list) {
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"\t\tBlock %3d: Start %4d, Count %4d\n",
+				index, rsrc_blks->rsrc_start,
+				rsrc_blks->rsrc_size);
+		index++;
+	}
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tRPI extents:\n");
+	index = 0;
+	list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_rpi_blk_list,
+			    list) {
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"\t\tBlock %3d: Start %4d, Count %4d\n",
+				index, rsrc_blks->rsrc_start,
+				rsrc_blks->rsrc_size);
+		index++;
+	}
+
+	len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+			"\tXRI extents:\n");
+	index = 0;
+	list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_xri_blk_list,
+			    list) {
+		len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+				"\t\tBlock %3d: Start %4d, Count %4d\n",
+				index, rsrc_blks->rsrc_start,
+				rsrc_blks->rsrc_size);
+		index++;
+	}
+
+	return len;
+}
+
+/**
+ * lpfc_idiag_extacc_write - Syntax check and set up idiag extacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for extent information access commands and sets
+ * up the necessary states in the idiag command struct accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_extacc_write(struct file *file, const char __user *buf,
+			size_t nbytes, loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	uint32_t ext_map;
+	int rc;
+
+	/* This is a user write operation */
+	debug->op = LPFC_IDIAG_OP_WR;
+
+	rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+	if (rc < 0)
+		return rc;
+
+	ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX];
+
+	if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD)
+		goto error_out;
+	if (rc != LPFC_EXT_ACC_CMD_ARG)
+		goto error_out;
+	if (!(ext_map & LPFC_EXT_ACC_ALL))
+		goto error_out;
+
+	return nbytes;
+error_out:
+	/* Clean out command structure on command error out */
+	memset(&idiag, 0, sizeof(idiag));
+	return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_extacc_read - idiag debugfs read access to extent information
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the proper extent information according to
+ * the idiag command, and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_extacc_read(struct file *file, char __user *buf, size_t nbytes,
+		       loff_t *ppos)
+{
+	struct lpfc_debug *debug = file->private_data;
+	struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+	char *pbuffer;
+	uint32_t ext_map;
+	int len = 0;
+
+	/* This is a user read operation */
+	debug->op = LPFC_IDIAG_OP_RD;
+
+	if (!debug->buffer)
+		debug->buffer = kmalloc(LPFC_EXT_ACC_BUF_SIZE, GFP_KERNEL);
+	if (!debug->buffer)
+		return 0;
+	pbuffer = debug->buffer;
+	if (*ppos)
+		return 0;
+	if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD)
+		return 0;
+
+	ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX];
+	if (ext_map & LPFC_EXT_ACC_AVAIL)
+		len = lpfc_idiag_extacc_avail_get(phba, pbuffer, len);
+	if (ext_map & LPFC_EXT_ACC_ALLOC)
+		len = lpfc_idiag_extacc_alloc_get(phba, pbuffer, len);
+	if (ext_map & LPFC_EXT_ACC_DRIVR)
+		len = lpfc_idiag_extacc_drivr_get(phba, pbuffer, len);
+
+	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+#undef lpfc_debugfs_op_disc_trc
+static const struct file_operations lpfc_debugfs_op_disc_trc = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_disc_trc_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.release =      lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_nodelist
+static const struct file_operations lpfc_debugfs_op_nodelist = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_nodelist_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.release =      lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_hbqinfo
+static const struct file_operations lpfc_debugfs_op_hbqinfo = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_hbqinfo_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.release =      lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_dumpHBASlim
+static const struct file_operations lpfc_debugfs_op_dumpHBASlim = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_dumpHBASlim_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.release =      lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_dumpHostSlim
+static const struct file_operations lpfc_debugfs_op_dumpHostSlim = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_dumpHostSlim_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.release =      lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_dumpData
+static const struct file_operations lpfc_debugfs_op_dumpData = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_dumpData_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.write =	lpfc_debugfs_dumpDataDif_write,
+	.release =      lpfc_debugfs_dumpDataDif_release,
+};
+
+#undef lpfc_debugfs_op_dumpDif
+static const struct file_operations lpfc_debugfs_op_dumpDif = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_dumpDif_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.write =	lpfc_debugfs_dumpDataDif_write,
+	.release =      lpfc_debugfs_dumpDataDif_release,
+};
+
+#undef lpfc_debugfs_op_dif_err
+static const struct file_operations lpfc_debugfs_op_dif_err = {
+	.owner =	THIS_MODULE,
+	.open =		simple_open,
+	.llseek =	lpfc_debugfs_lseek,
+	.read =		lpfc_debugfs_dif_err_read,
+	.write =	lpfc_debugfs_dif_err_write,
+	.release =	lpfc_debugfs_dif_err_release,
+};
+
+#undef lpfc_debugfs_op_slow_ring_trc
+static const struct file_operations lpfc_debugfs_op_slow_ring_trc = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_debugfs_slow_ring_trc_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_debugfs_read,
+	.release =      lpfc_debugfs_release,
+};
+
+static struct dentry *lpfc_debugfs_root = NULL;
+static atomic_t lpfc_debugfs_hba_count;
+
+/*
+ * File operations for the iDiag debugfs
+ */
+#undef lpfc_idiag_op_pciCfg
+static const struct file_operations lpfc_idiag_op_pciCfg = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_idiag_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_idiag_pcicfg_read,
+	.write =        lpfc_idiag_pcicfg_write,
+	.release =      lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_barAcc
+static const struct file_operations lpfc_idiag_op_barAcc = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_idiag_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_idiag_baracc_read,
+	.write =        lpfc_idiag_baracc_write,
+	.release =      lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_queInfo
+static const struct file_operations lpfc_idiag_op_queInfo = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_idiag_open,
+	.read =         lpfc_idiag_queinfo_read,
+	.release =      lpfc_idiag_release,
+};
+
+#undef lpfc_idiag_op_queAcc
+static const struct file_operations lpfc_idiag_op_queAcc = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_idiag_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_idiag_queacc_read,
+	.write =        lpfc_idiag_queacc_write,
+	.release =      lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_drbAcc
+static const struct file_operations lpfc_idiag_op_drbAcc = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_idiag_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_idiag_drbacc_read,
+	.write =        lpfc_idiag_drbacc_write,
+	.release =      lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_ctlAcc
+static const struct file_operations lpfc_idiag_op_ctlAcc = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_idiag_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_idiag_ctlacc_read,
+	.write =        lpfc_idiag_ctlacc_write,
+	.release =      lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_mbxAcc
+static const struct file_operations lpfc_idiag_op_mbxAcc = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_idiag_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_idiag_mbxacc_read,
+	.write =        lpfc_idiag_mbxacc_write,
+	.release =      lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_extAcc
+static const struct file_operations lpfc_idiag_op_extAcc = {
+	.owner =        THIS_MODULE,
+	.open =         lpfc_idiag_open,
+	.llseek =       lpfc_debugfs_lseek,
+	.read =         lpfc_idiag_extacc_read,
+	.write =        lpfc_idiag_extacc_write,
+	.release =      lpfc_idiag_cmd_release,
+};
+
+#endif
+
+/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * Description:
+ * This routine dump a bsg pass-through non-embedded mailbox command with
+ * external buffer.
+ **/
+void
+lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp,
+				enum mbox_type mbox_tp, enum dma_type dma_tp,
+				enum sta_type sta_tp,
+				struct lpfc_dmabuf *dmabuf, uint32_t ext_buf)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	uint32_t *mbx_mbox_cmd, *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt;
+	char line_buf[LPFC_MBX_ACC_LBUF_SZ];
+	int len = 0;
+	uint32_t do_dump = 0;
+	uint32_t *pword;
+	uint32_t i;
+
+	if (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP)
+		return;
+
+	mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
+	mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
+	mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
+	mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
+
+	if (!(*mbx_dump_map & LPFC_MBX_DMP_ALL) ||
+	    (*mbx_dump_cnt == 0) ||
+	    (*mbx_word_cnt == 0))
+		return;
+
+	if (*mbx_mbox_cmd != 0x9B)
+		return;
+
+	if ((mbox_tp == mbox_rd) && (dma_tp == dma_mbox)) {
+		if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_MBX) {
+			do_dump |= LPFC_BSG_DMP_MBX_RD_MBX;
+			printk(KERN_ERR "\nRead mbox command (x%x), "
+			       "nemb:0x%x, extbuf_cnt:%d:\n",
+			       sta_tp, nemb_tp, ext_buf);
+		}
+	}
+	if ((mbox_tp == mbox_rd) && (dma_tp == dma_ebuf)) {
+		if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_BUF) {
+			do_dump |= LPFC_BSG_DMP_MBX_RD_BUF;
+			printk(KERN_ERR "\nRead mbox buffer (x%x), "
+			       "nemb:0x%x, extbuf_seq:%d:\n",
+			       sta_tp, nemb_tp, ext_buf);
+		}
+	}
+	if ((mbox_tp == mbox_wr) && (dma_tp == dma_mbox)) {
+		if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_MBX) {
+			do_dump |= LPFC_BSG_DMP_MBX_WR_MBX;
+			printk(KERN_ERR "\nWrite mbox command (x%x), "
+			       "nemb:0x%x, extbuf_cnt:%d:\n",
+			       sta_tp, nemb_tp, ext_buf);
+		}
+	}
+	if ((mbox_tp == mbox_wr) && (dma_tp == dma_ebuf)) {
+		if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_BUF) {
+			do_dump |= LPFC_BSG_DMP_MBX_WR_BUF;
+			printk(KERN_ERR "\nWrite mbox buffer (x%x), "
+			       "nemb:0x%x, extbuf_seq:%d:\n",
+			       sta_tp, nemb_tp, ext_buf);
+		}
+	}
+
+	/* dump buffer content */
+	if (do_dump) {
+		pword = (uint32_t *)dmabuf->virt;
+		for (i = 0; i < *mbx_word_cnt; i++) {
+			if (!(i % 8)) {
+				if (i != 0)
+					printk(KERN_ERR "%s\n", line_buf);
+				len = 0;
+				len += snprintf(line_buf+len,
+						LPFC_MBX_ACC_LBUF_SZ-len,
+						"%03d: ", i);
+			}
+			len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
+					"%08x ", (uint32_t)*pword);
+			pword++;
+		}
+		if ((i - 1) % 8)
+			printk(KERN_ERR "%s\n", line_buf);
+		(*mbx_dump_cnt)--;
+	}
+
+	/* Clean out command structure on reaching dump count */
+	if (*mbx_dump_cnt == 0)
+		memset(&idiag, 0, sizeof(idiag));
+	return;
+#endif
+}
+
+/* lpfc_idiag_mbxacc_dump_issue_mbox - idiag debugfs dump issue mailbox command
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * Description:
+ * This routine dump a pass-through non-embedded mailbox command from issue
+ * mailbox command.
+ **/
+void
+lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	uint32_t *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt, *mbx_mbox_cmd;
+	char line_buf[LPFC_MBX_ACC_LBUF_SZ];
+	int len = 0;
+	uint32_t *pword;
+	uint8_t *pbyte;
+	uint32_t i, j;
+
+	if (idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP)
+		return;
+
+	mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
+	mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
+	mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
+	mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
+
+	if (!(*mbx_dump_map & LPFC_MBX_DMP_MBX_ALL) ||
+	    (*mbx_dump_cnt == 0) ||
+	    (*mbx_word_cnt == 0))
+		return;
+
+	if ((*mbx_mbox_cmd != LPFC_MBX_ALL_CMD) &&
+	    (*mbx_mbox_cmd != pmbox->mbxCommand))
+		return;
+
+	/* dump buffer content */
+	if (*mbx_dump_map & LPFC_MBX_DMP_MBX_WORD) {
+		printk(KERN_ERR "Mailbox command:0x%x dump by word:\n",
+		       pmbox->mbxCommand);
+		pword = (uint32_t *)pmbox;
+		for (i = 0; i < *mbx_word_cnt; i++) {
+			if (!(i % 8)) {
+				if (i != 0)
+					printk(KERN_ERR "%s\n", line_buf);
+				len = 0;
+				memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
+				len += snprintf(line_buf+len,
+						LPFC_MBX_ACC_LBUF_SZ-len,
+						"%03d: ", i);
+			}
+			len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
+					"%08x ",
+					((uint32_t)*pword) & 0xffffffff);
+			pword++;
+		}
+		if ((i - 1) % 8)
+			printk(KERN_ERR "%s\n", line_buf);
+		printk(KERN_ERR "\n");
+	}
+	if (*mbx_dump_map & LPFC_MBX_DMP_MBX_BYTE) {
+		printk(KERN_ERR "Mailbox command:0x%x dump by byte:\n",
+		       pmbox->mbxCommand);
+		pbyte = (uint8_t *)pmbox;
+		for (i = 0; i < *mbx_word_cnt; i++) {
+			if (!(i % 8)) {
+				if (i != 0)
+					printk(KERN_ERR "%s\n", line_buf);
+				len = 0;
+				memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
+				len += snprintf(line_buf+len,
+						LPFC_MBX_ACC_LBUF_SZ-len,
+						"%03d: ", i);
+			}
+			for (j = 0; j < 4; j++) {
+				len += snprintf(line_buf+len,
+						LPFC_MBX_ACC_LBUF_SZ-len,
+						"%02x",
+						((uint8_t)*pbyte) & 0xff);
+				pbyte++;
+			}
+			len += snprintf(line_buf+len,
+					LPFC_MBX_ACC_LBUF_SZ-len, " ");
+		}
+		if ((i - 1) % 8)
+			printk(KERN_ERR "%s\n", line_buf);
+		printk(KERN_ERR "\n");
+	}
+	(*mbx_dump_cnt)--;
+
+	/* Clean out command structure on reaching dump count */
+	if (*mbx_dump_cnt == 0)
+		memset(&idiag, 0, sizeof(idiag));
+	return;
+#endif
+}
+
+/**
+ * lpfc_debugfs_initialize - Initialize debugfs for a vport
+ * @vport: The vport pointer to initialize.
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the lpfc debugfs file system.
+ * If not already created, this routine will create the lpfc directory, and
+ * lpfcX directory (for this HBA), and vportX directory for this vport. It will
+ * also create each file used to access lpfc specific debugfs information.
+ **/
+inline void
+lpfc_debugfs_initialize(struct lpfc_vport *vport)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	struct lpfc_hba   *phba = vport->phba;
+	char name[64];
+	uint32_t num, i;
+
+	if (!lpfc_debugfs_enable)
+		return;
+
+	/* Setup lpfc root directory */
+	if (!lpfc_debugfs_root) {
+		lpfc_debugfs_root = debugfs_create_dir("lpfc", NULL);
+		atomic_set(&lpfc_debugfs_hba_count, 0);
+		if (!lpfc_debugfs_root) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "0408 Cannot create debugfs root\n");
+			goto debug_failed;
+		}
+	}
+	if (!lpfc_debugfs_start_time)
+		lpfc_debugfs_start_time = jiffies;
+
+	/* Setup funcX directory for specific HBA PCI function */
+	snprintf(name, sizeof(name), "fn%d", phba->brd_no);
+	if (!phba->hba_debugfs_root) {
+		phba->hba_debugfs_root =
+			debugfs_create_dir(name, lpfc_debugfs_root);
+		if (!phba->hba_debugfs_root) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "0412 Cannot create debugfs hba\n");
+			goto debug_failed;
+		}
+		atomic_inc(&lpfc_debugfs_hba_count);
+		atomic_set(&phba->debugfs_vport_count, 0);
+
+		/* Setup hbqinfo */
+		snprintf(name, sizeof(name), "hbqinfo");
+		phba->debug_hbqinfo =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				 phba->hba_debugfs_root,
+				 phba, &lpfc_debugfs_op_hbqinfo);
+		if (!phba->debug_hbqinfo) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0411 Cannot create debugfs hbqinfo\n");
+			goto debug_failed;
+		}
+
+		/* Setup dumpHBASlim */
+		if (phba->sli_rev < LPFC_SLI_REV4) {
+			snprintf(name, sizeof(name), "dumpHBASlim");
+			phba->debug_dumpHBASlim =
+				debugfs_create_file(name,
+					S_IFREG|S_IRUGO|S_IWUSR,
+					phba->hba_debugfs_root,
+					phba, &lpfc_debugfs_op_dumpHBASlim);
+			if (!phba->debug_dumpHBASlim) {
+				lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+						 "0413 Cannot create debugfs "
+						"dumpHBASlim\n");
+				goto debug_failed;
+			}
+		} else
+			phba->debug_dumpHBASlim = NULL;
+
+		/* Setup dumpHostSlim */
+		if (phba->sli_rev < LPFC_SLI_REV4) {
+			snprintf(name, sizeof(name), "dumpHostSlim");
+			phba->debug_dumpHostSlim =
+				debugfs_create_file(name,
+					S_IFREG|S_IRUGO|S_IWUSR,
+					phba->hba_debugfs_root,
+					phba, &lpfc_debugfs_op_dumpHostSlim);
+			if (!phba->debug_dumpHostSlim) {
+				lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+						 "0414 Cannot create debugfs "
+						 "dumpHostSlim\n");
+				goto debug_failed;
+			}
+		} else
+			phba->debug_dumpHBASlim = NULL;
+
+		/* Setup dumpData */
+		snprintf(name, sizeof(name), "dumpData");
+		phba->debug_dumpData =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				 phba->hba_debugfs_root,
+				 phba, &lpfc_debugfs_op_dumpData);
+		if (!phba->debug_dumpData) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0800 Cannot create debugfs dumpData\n");
+			goto debug_failed;
+		}
+
+		/* Setup dumpDif */
+		snprintf(name, sizeof(name), "dumpDif");
+		phba->debug_dumpDif =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				 phba->hba_debugfs_root,
+				 phba, &lpfc_debugfs_op_dumpDif);
+		if (!phba->debug_dumpDif) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0801 Cannot create debugfs dumpDif\n");
+			goto debug_failed;
+		}
+
+		/* Setup DIF Error Injections */
+		snprintf(name, sizeof(name), "InjErrLBA");
+		phba->debug_InjErrLBA =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_InjErrLBA) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0807 Cannot create debugfs InjErrLBA\n");
+			goto debug_failed;
+		}
+		phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+
+		snprintf(name, sizeof(name), "InjErrNPortID");
+		phba->debug_InjErrNPortID =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_InjErrNPortID) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0809 Cannot create debugfs InjErrNPortID\n");
+			goto debug_failed;
+		}
+
+		snprintf(name, sizeof(name), "InjErrWWPN");
+		phba->debug_InjErrWWPN =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_InjErrWWPN) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0810 Cannot create debugfs InjErrWWPN\n");
+			goto debug_failed;
+		}
+
+		snprintf(name, sizeof(name), "writeGuardInjErr");
+		phba->debug_writeGuard =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_writeGuard) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0802 Cannot create debugfs writeGuard\n");
+			goto debug_failed;
+		}
+
+		snprintf(name, sizeof(name), "writeAppInjErr");
+		phba->debug_writeApp =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_writeApp) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0803 Cannot create debugfs writeApp\n");
+			goto debug_failed;
+		}
+
+		snprintf(name, sizeof(name), "writeRefInjErr");
+		phba->debug_writeRef =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_writeRef) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0804 Cannot create debugfs writeRef\n");
+			goto debug_failed;
+		}
+
+		snprintf(name, sizeof(name), "readGuardInjErr");
+		phba->debug_readGuard =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_readGuard) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0808 Cannot create debugfs readGuard\n");
+			goto debug_failed;
+		}
+
+		snprintf(name, sizeof(name), "readAppInjErr");
+		phba->debug_readApp =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_readApp) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0805 Cannot create debugfs readApp\n");
+			goto debug_failed;
+		}
+
+		snprintf(name, sizeof(name), "readRefInjErr");
+		phba->debug_readRef =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+			phba->hba_debugfs_root,
+			phba, &lpfc_debugfs_op_dif_err);
+		if (!phba->debug_readRef) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"0806 Cannot create debugfs readApp\n");
+			goto debug_failed;
+		}
+
+		/* Setup slow ring trace */
+		if (lpfc_debugfs_max_slow_ring_trc) {
+			num = lpfc_debugfs_max_slow_ring_trc - 1;
+			if (num & lpfc_debugfs_max_slow_ring_trc) {
+				/* Change to be a power of 2 */
+				num = lpfc_debugfs_max_slow_ring_trc;
+				i = 0;
+				while (num > 1) {
+					num = num >> 1;
+					i++;
+				}
+				lpfc_debugfs_max_slow_ring_trc = (1 << i);
+				printk(KERN_ERR
+				       "lpfc_debugfs_max_disc_trc changed to "
+				       "%d\n", lpfc_debugfs_max_disc_trc);
+			}
+		}
+
+		snprintf(name, sizeof(name), "slow_ring_trace");
+		phba->debug_slow_ring_trc =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				 phba->hba_debugfs_root,
+				 phba, &lpfc_debugfs_op_slow_ring_trc);
+		if (!phba->debug_slow_ring_trc) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "0415 Cannot create debugfs "
+					 "slow_ring_trace\n");
+			goto debug_failed;
+		}
+		if (!phba->slow_ring_trc) {
+			phba->slow_ring_trc = kmalloc(
+				(sizeof(struct lpfc_debugfs_trc) *
+				lpfc_debugfs_max_slow_ring_trc),
+				GFP_KERNEL);
+			if (!phba->slow_ring_trc) {
+				lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+						 "0416 Cannot create debugfs "
+						 "slow_ring buffer\n");
+				goto debug_failed;
+			}
+			atomic_set(&phba->slow_ring_trc_cnt, 0);
+			memset(phba->slow_ring_trc, 0,
+				(sizeof(struct lpfc_debugfs_trc) *
+				lpfc_debugfs_max_slow_ring_trc));
+		}
+	}
+
+	snprintf(name, sizeof(name), "vport%d", vport->vpi);
+	if (!vport->vport_debugfs_root) {
+		vport->vport_debugfs_root =
+			debugfs_create_dir(name, phba->hba_debugfs_root);
+		if (!vport->vport_debugfs_root) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "0417 Can't create debugfs\n");
+			goto debug_failed;
+		}
+		atomic_inc(&phba->debugfs_vport_count);
+	}
+
+	if (lpfc_debugfs_max_disc_trc) {
+		num = lpfc_debugfs_max_disc_trc - 1;
+		if (num & lpfc_debugfs_max_disc_trc) {
+			/* Change to be a power of 2 */
+			num = lpfc_debugfs_max_disc_trc;
+			i = 0;
+			while (num > 1) {
+				num = num >> 1;
+				i++;
+			}
+			lpfc_debugfs_max_disc_trc = (1 << i);
+			printk(KERN_ERR
+			       "lpfc_debugfs_max_disc_trc changed to %d\n",
+			       lpfc_debugfs_max_disc_trc);
+		}
+	}
+
+	vport->disc_trc = kzalloc(
+		(sizeof(struct lpfc_debugfs_trc) * lpfc_debugfs_max_disc_trc),
+		GFP_KERNEL);
+
+	if (!vport->disc_trc) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "0418 Cannot create debugfs disc trace "
+				 "buffer\n");
+		goto debug_failed;
+	}
+	atomic_set(&vport->disc_trc_cnt, 0);
+
+	snprintf(name, sizeof(name), "discovery_trace");
+	vport->debug_disc_trc =
+		debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				 vport->vport_debugfs_root,
+				 vport, &lpfc_debugfs_op_disc_trc);
+	if (!vport->debug_disc_trc) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "0419 Cannot create debugfs "
+				 "discovery_trace\n");
+		goto debug_failed;
+	}
+	snprintf(name, sizeof(name), "nodelist");
+	vport->debug_nodelist =
+		debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				 vport->vport_debugfs_root,
+				 vport, &lpfc_debugfs_op_nodelist);
+	if (!vport->debug_nodelist) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				 "2985 Can't create debugfs nodelist\n");
+		goto debug_failed;
+	}
+
+	/*
+	 * iDiag debugfs root entry points for SLI4 device only
+	 */
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		goto debug_failed;
+
+	snprintf(name, sizeof(name), "iDiag");
+	if (!phba->idiag_root) {
+		phba->idiag_root =
+			debugfs_create_dir(name, phba->hba_debugfs_root);
+		if (!phba->idiag_root) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "2922 Can't create idiag debugfs\n");
+			goto debug_failed;
+		}
+		/* Initialize iDiag data structure */
+		memset(&idiag, 0, sizeof(idiag));
+	}
+
+	/* iDiag read PCI config space */
+	snprintf(name, sizeof(name), "pciCfg");
+	if (!phba->idiag_pci_cfg) {
+		phba->idiag_pci_cfg =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				phba->idiag_root, phba, &lpfc_idiag_op_pciCfg);
+		if (!phba->idiag_pci_cfg) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "2923 Can't create idiag debugfs\n");
+			goto debug_failed;
+		}
+		idiag.offset.last_rd = 0;
+	}
+
+	/* iDiag PCI BAR access */
+	snprintf(name, sizeof(name), "barAcc");
+	if (!phba->idiag_bar_acc) {
+		phba->idiag_bar_acc =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				phba->idiag_root, phba, &lpfc_idiag_op_barAcc);
+		if (!phba->idiag_bar_acc) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					"3056 Can't create idiag debugfs\n");
+			goto debug_failed;
+		}
+		idiag.offset.last_rd = 0;
+	}
+
+	/* iDiag get PCI function queue information */
+	snprintf(name, sizeof(name), "queInfo");
+	if (!phba->idiag_que_info) {
+		phba->idiag_que_info =
+			debugfs_create_file(name, S_IFREG|S_IRUGO,
+			phba->idiag_root, phba, &lpfc_idiag_op_queInfo);
+		if (!phba->idiag_que_info) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "2924 Can't create idiag debugfs\n");
+			goto debug_failed;
+		}
+	}
+
+	/* iDiag access PCI function queue */
+	snprintf(name, sizeof(name), "queAcc");
+	if (!phba->idiag_que_acc) {
+		phba->idiag_que_acc =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				phba->idiag_root, phba, &lpfc_idiag_op_queAcc);
+		if (!phba->idiag_que_acc) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "2926 Can't create idiag debugfs\n");
+			goto debug_failed;
+		}
+	}
+
+	/* iDiag access PCI function doorbell registers */
+	snprintf(name, sizeof(name), "drbAcc");
+	if (!phba->idiag_drb_acc) {
+		phba->idiag_drb_acc =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				phba->idiag_root, phba, &lpfc_idiag_op_drbAcc);
+		if (!phba->idiag_drb_acc) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "2927 Can't create idiag debugfs\n");
+			goto debug_failed;
+		}
+	}
+
+	/* iDiag access PCI function control registers */
+	snprintf(name, sizeof(name), "ctlAcc");
+	if (!phba->idiag_ctl_acc) {
+		phba->idiag_ctl_acc =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				phba->idiag_root, phba, &lpfc_idiag_op_ctlAcc);
+		if (!phba->idiag_ctl_acc) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					 "2981 Can't create idiag debugfs\n");
+			goto debug_failed;
+		}
+	}
+
+	/* iDiag access mbox commands */
+	snprintf(name, sizeof(name), "mbxAcc");
+	if (!phba->idiag_mbx_acc) {
+		phba->idiag_mbx_acc =
+			debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+				phba->idiag_root, phba, &lpfc_idiag_op_mbxAcc);
+		if (!phba->idiag_mbx_acc) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+					"2980 Can't create idiag debugfs\n");
+			goto debug_failed;
+		}
+	}
+
+	/* iDiag extents access commands */
+	if (phba->sli4_hba.extents_in_use) {
+		snprintf(name, sizeof(name), "extAcc");
+		if (!phba->idiag_ext_acc) {
+			phba->idiag_ext_acc =
+				debugfs_create_file(name,
+						    S_IFREG|S_IRUGO|S_IWUSR,
+						    phba->idiag_root, phba,
+						    &lpfc_idiag_op_extAcc);
+			if (!phba->idiag_ext_acc) {
+				lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+						"2986 Cant create "
+						"idiag debugfs\n");
+				goto debug_failed;
+			}
+		}
+	}
+
+debug_failed:
+	return;
+#endif
+}
+
+/**
+ * lpfc_debugfs_terminate -  Tear down debugfs infrastructure for this vport
+ * @vport: The vport pointer to remove from debugfs.
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system elements
+ * that are specific to this vport. It also checks to see if there are any
+ * users left for the debugfs directories associated with the HBA and driver. If
+ * this is the last user of the HBA directory or driver directory then it will
+ * remove those from the debugfs infrastructure as well.
+ **/
+inline void
+lpfc_debugfs_terminate(struct lpfc_vport *vport)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	struct lpfc_hba   *phba = vport->phba;
+
+	if (vport->disc_trc) {
+		kfree(vport->disc_trc);
+		vport->disc_trc = NULL;
+	}
+	if (vport->debug_disc_trc) {
+		debugfs_remove(vport->debug_disc_trc); /* discovery_trace */
+		vport->debug_disc_trc = NULL;
+	}
+	if (vport->debug_nodelist) {
+		debugfs_remove(vport->debug_nodelist); /* nodelist */
+		vport->debug_nodelist = NULL;
+	}
+	if (vport->vport_debugfs_root) {
+		debugfs_remove(vport->vport_debugfs_root); /* vportX */
+		vport->vport_debugfs_root = NULL;
+		atomic_dec(&phba->debugfs_vport_count);
+	}
+	if (atomic_read(&phba->debugfs_vport_count) == 0) {
+
+		if (phba->debug_hbqinfo) {
+			debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
+			phba->debug_hbqinfo = NULL;
+		}
+		if (phba->debug_dumpHBASlim) {
+			debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */
+			phba->debug_dumpHBASlim = NULL;
+		}
+		if (phba->debug_dumpHostSlim) {
+			debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
+			phba->debug_dumpHostSlim = NULL;
+		}
+		if (phba->debug_dumpData) {
+			debugfs_remove(phba->debug_dumpData); /* dumpData */
+			phba->debug_dumpData = NULL;
+		}
+
+		if (phba->debug_dumpDif) {
+			debugfs_remove(phba->debug_dumpDif); /* dumpDif */
+			phba->debug_dumpDif = NULL;
+		}
+		if (phba->debug_InjErrLBA) {
+			debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
+			phba->debug_InjErrLBA = NULL;
+		}
+		if (phba->debug_InjErrNPortID) {	 /* InjErrNPortID */
+			debugfs_remove(phba->debug_InjErrNPortID);
+			phba->debug_InjErrNPortID = NULL;
+		}
+		if (phba->debug_InjErrWWPN) {
+			debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */
+			phba->debug_InjErrWWPN = NULL;
+		}
+		if (phba->debug_writeGuard) {
+			debugfs_remove(phba->debug_writeGuard); /* writeGuard */
+			phba->debug_writeGuard = NULL;
+		}
+		if (phba->debug_writeApp) {
+			debugfs_remove(phba->debug_writeApp); /* writeApp */
+			phba->debug_writeApp = NULL;
+		}
+		if (phba->debug_writeRef) {
+			debugfs_remove(phba->debug_writeRef); /* writeRef */
+			phba->debug_writeRef = NULL;
+		}
+		if (phba->debug_readGuard) {
+			debugfs_remove(phba->debug_readGuard); /* readGuard */
+			phba->debug_readGuard = NULL;
+		}
+		if (phba->debug_readApp) {
+			debugfs_remove(phba->debug_readApp); /* readApp */
+			phba->debug_readApp = NULL;
+		}
+		if (phba->debug_readRef) {
+			debugfs_remove(phba->debug_readRef); /* readRef */
+			phba->debug_readRef = NULL;
+		}
+
+		if (phba->slow_ring_trc) {
+			kfree(phba->slow_ring_trc);
+			phba->slow_ring_trc = NULL;
+		}
+		if (phba->debug_slow_ring_trc) {
+			/* slow_ring_trace */
+			debugfs_remove(phba->debug_slow_ring_trc);
+			phba->debug_slow_ring_trc = NULL;
+		}
+
+		/*
+		 * iDiag release
+		 */
+		if (phba->sli_rev == LPFC_SLI_REV4) {
+			if (phba->idiag_ext_acc) {
+				/* iDiag extAcc */
+				debugfs_remove(phba->idiag_ext_acc);
+				phba->idiag_ext_acc = NULL;
+			}
+			if (phba->idiag_mbx_acc) {
+				/* iDiag mbxAcc */
+				debugfs_remove(phba->idiag_mbx_acc);
+				phba->idiag_mbx_acc = NULL;
+			}
+			if (phba->idiag_ctl_acc) {
+				/* iDiag ctlAcc */
+				debugfs_remove(phba->idiag_ctl_acc);
+				phba->idiag_ctl_acc = NULL;
+			}
+			if (phba->idiag_drb_acc) {
+				/* iDiag drbAcc */
+				debugfs_remove(phba->idiag_drb_acc);
+				phba->idiag_drb_acc = NULL;
+			}
+			if (phba->idiag_que_acc) {
+				/* iDiag queAcc */
+				debugfs_remove(phba->idiag_que_acc);
+				phba->idiag_que_acc = NULL;
+			}
+			if (phba->idiag_que_info) {
+				/* iDiag queInfo */
+				debugfs_remove(phba->idiag_que_info);
+				phba->idiag_que_info = NULL;
+			}
+			if (phba->idiag_bar_acc) {
+				/* iDiag barAcc */
+				debugfs_remove(phba->idiag_bar_acc);
+				phba->idiag_bar_acc = NULL;
+			}
+			if (phba->idiag_pci_cfg) {
+				/* iDiag pciCfg */
+				debugfs_remove(phba->idiag_pci_cfg);
+				phba->idiag_pci_cfg = NULL;
+			}
+
+			/* Finally remove the iDiag debugfs root */
+			if (phba->idiag_root) {
+				/* iDiag root */
+				debugfs_remove(phba->idiag_root);
+				phba->idiag_root = NULL;
+			}
+		}
+
+		if (phba->hba_debugfs_root) {
+			debugfs_remove(phba->hba_debugfs_root); /* fnX */
+			phba->hba_debugfs_root = NULL;
+			atomic_dec(&lpfc_debugfs_hba_count);
+		}
+
+		if (atomic_read(&lpfc_debugfs_hba_count) == 0) {
+			debugfs_remove(lpfc_debugfs_root); /* lpfc */
+			lpfc_debugfs_root = NULL;
+		}
+	}
+#endif
+	return;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_debugfs.h b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_debugfs.h
new file mode 100644
index 0000000..f83bd94
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -0,0 +1,269 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2007-2011 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#ifndef _H_LPFC_DEBUG_FS
+#define _H_LPFC_DEBUG_FS
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+
+/* size of output line, for discovery_trace and slow_ring_trace */
+#define LPFC_DEBUG_TRC_ENTRY_SIZE 100
+
+/* nodelist output buffer size */
+#define LPFC_NODELIST_SIZE 8192
+#define LPFC_NODELIST_ENTRY_SIZE 120
+
+/* dumpHBASlim output buffer size */
+#define LPFC_DUMPHBASLIM_SIZE 4096
+
+/* dumpHostSlim output buffer size */
+#define LPFC_DUMPHOSTSLIM_SIZE 4096
+
+/* hbqinfo output buffer size */
+#define LPFC_HBQINFO_SIZE 8192
+
+/*
+ * For SLI4 iDiag debugfs diagnostics tool
+ */
+
+/* pciConf */
+#define LPFC_PCI_CFG_BROWSE 0xffff
+#define LPFC_PCI_CFG_RD_CMD_ARG 2
+#define LPFC_PCI_CFG_WR_CMD_ARG 3
+#define LPFC_PCI_CFG_SIZE 4096
+#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4)
+
+#define IDIAG_PCICFG_WHERE_INDX 0
+#define IDIAG_PCICFG_COUNT_INDX 1
+#define IDIAG_PCICFG_VALUE_INDX 2
+
+/* barAcc */
+#define LPFC_PCI_BAR_BROWSE 0xffff
+#define LPFC_PCI_BAR_RD_CMD_ARG 3
+#define LPFC_PCI_BAR_WR_CMD_ARG 3
+
+#define LPFC_PCI_IF0_BAR0_SIZE (1024 *  16)
+#define LPFC_PCI_IF0_BAR1_SIZE (1024 * 128)
+#define LPFC_PCI_IF0_BAR2_SIZE (1024 * 128)
+#define LPFC_PCI_IF2_BAR0_SIZE (1024 *  32)
+
+#define LPFC_PCI_BAR_RD_BUF_SIZE 4096
+#define LPFC_PCI_BAR_RD_SIZE (LPFC_PCI_BAR_RD_BUF_SIZE/4)
+
+#define LPFC_PCI_IF0_BAR0_RD_SIZE (LPFC_PCI_IF0_BAR0_SIZE/4)
+#define LPFC_PCI_IF0_BAR1_RD_SIZE (LPFC_PCI_IF0_BAR1_SIZE/4)
+#define LPFC_PCI_IF0_BAR2_RD_SIZE (LPFC_PCI_IF0_BAR2_SIZE/4)
+#define LPFC_PCI_IF2_BAR0_RD_SIZE (LPFC_PCI_IF2_BAR0_SIZE/4)
+
+#define IDIAG_BARACC_BAR_NUM_INDX 0
+#define IDIAG_BARACC_OFF_SET_INDX 1
+#define IDIAG_BARACC_ACC_MOD_INDX 2
+#define IDIAG_BARACC_REG_VAL_INDX 2
+#define IDIAG_BARACC_BAR_SZE_INDX 3
+
+#define IDIAG_BARACC_BAR_0 0
+#define IDIAG_BARACC_BAR_1 1
+#define IDIAG_BARACC_BAR_2 2
+
+#define SINGLE_WORD 1
+
+/* queue info */
+#define LPFC_QUE_INFO_GET_BUF_SIZE 4096
+
+/* queue acc */
+#define LPFC_QUE_ACC_BROWSE 0xffff
+#define LPFC_QUE_ACC_RD_CMD_ARG 4
+#define LPFC_QUE_ACC_WR_CMD_ARG 6
+#define LPFC_QUE_ACC_BUF_SIZE 4096
+#define LPFC_QUE_ACC_SIZE (LPFC_QUE_ACC_BUF_SIZE/2)
+
+#define LPFC_IDIAG_EQ 1
+#define LPFC_IDIAG_CQ 2
+#define LPFC_IDIAG_MQ 3
+#define LPFC_IDIAG_WQ 4
+#define LPFC_IDIAG_RQ 5
+
+#define IDIAG_QUEACC_QUETP_INDX 0
+#define IDIAG_QUEACC_QUEID_INDX 1
+#define IDIAG_QUEACC_INDEX_INDX 2
+#define IDIAG_QUEACC_COUNT_INDX 3
+#define IDIAG_QUEACC_OFFST_INDX 4
+#define IDIAG_QUEACC_VALUE_INDX 5
+
+/* doorbell register acc */
+#define LPFC_DRB_ACC_ALL 0xffff
+#define LPFC_DRB_ACC_RD_CMD_ARG 1
+#define LPFC_DRB_ACC_WR_CMD_ARG 2
+#define LPFC_DRB_ACC_BUF_SIZE 256
+
+#define LPFC_DRB_EQCQ 1
+#define LPFC_DRB_MQ   2
+#define LPFC_DRB_WQ   3
+#define LPFC_DRB_RQ   4
+
+#define LPFC_DRB_MAX  4
+
+#define IDIAG_DRBACC_REGID_INDX 0
+#define IDIAG_DRBACC_VALUE_INDX 1
+
+/* control register acc */
+#define LPFC_CTL_ACC_ALL 0xffff
+#define LPFC_CTL_ACC_RD_CMD_ARG 1
+#define LPFC_CTL_ACC_WR_CMD_ARG 2
+#define LPFC_CTL_ACC_BUF_SIZE 256
+
+#define LPFC_CTL_PORT_SEM  1
+#define LPFC_CTL_PORT_STA  2
+#define LPFC_CTL_PORT_CTL  3
+#define LPFC_CTL_PORT_ER1  4
+#define LPFC_CTL_PORT_ER2  5
+#define LPFC_CTL_PDEV_CTL  6
+
+#define LPFC_CTL_MAX  6
+
+#define IDIAG_CTLACC_REGID_INDX 0
+#define IDIAG_CTLACC_VALUE_INDX 1
+
+/* mailbox access */
+#define LPFC_MBX_DMP_ARG 4
+
+#define LPFC_MBX_ACC_BUF_SIZE 512
+#define LPFC_MBX_ACC_LBUF_SZ 128
+
+#define LPFC_MBX_DMP_MBX_WORD 0x00000001
+#define LPFC_MBX_DMP_MBX_BYTE 0x00000002
+#define LPFC_MBX_DMP_MBX_ALL (LPFC_MBX_DMP_MBX_WORD | LPFC_MBX_DMP_MBX_BYTE)
+
+#define LPFC_BSG_DMP_MBX_RD_MBX 0x00000001
+#define LPFC_BSG_DMP_MBX_RD_BUF 0x00000002
+#define LPFC_BSG_DMP_MBX_WR_MBX 0x00000004
+#define LPFC_BSG_DMP_MBX_WR_BUF 0x00000008
+#define LPFC_BSG_DMP_MBX_ALL (LPFC_BSG_DMP_MBX_RD_MBX | \
+			      LPFC_BSG_DMP_MBX_RD_BUF | \
+			      LPFC_BSG_DMP_MBX_WR_MBX | \
+			      LPFC_BSG_DMP_MBX_WR_BUF)
+
+#define LPFC_MBX_DMP_ALL 0xffff
+#define LPFC_MBX_ALL_CMD 0xff
+
+#define IDIAG_MBXACC_MBCMD_INDX 0
+#define IDIAG_MBXACC_DPMAP_INDX 1
+#define IDIAG_MBXACC_DPCNT_INDX 2
+#define IDIAG_MBXACC_WDCNT_INDX 3
+
+/* extents access */
+#define LPFC_EXT_ACC_CMD_ARG 1
+#define LPFC_EXT_ACC_BUF_SIZE 4096
+
+#define LPFC_EXT_ACC_AVAIL 0x1
+#define LPFC_EXT_ACC_ALLOC 0x2
+#define LPFC_EXT_ACC_DRIVR 0x4
+#define LPFC_EXT_ACC_ALL   (LPFC_EXT_ACC_DRIVR | \
+			    LPFC_EXT_ACC_AVAIL | \
+			    LPFC_EXT_ACC_ALLOC)
+
+#define IDIAG_EXTACC_EXMAP_INDX 0
+
+#define SIZE_U8  sizeof(uint8_t)
+#define SIZE_U16 sizeof(uint16_t)
+#define SIZE_U32 sizeof(uint32_t)
+
+struct lpfc_debug {
+	char *i_private;
+	char op;
+#define LPFC_IDIAG_OP_RD 1
+#define LPFC_IDIAG_OP_WR 2
+	char *buffer;
+	int  len;
+};
+
+struct lpfc_debugfs_trc {
+	char *fmt;
+	uint32_t data1;
+	uint32_t data2;
+	uint32_t data3;
+	uint32_t seq_cnt;
+	unsigned long jif;
+};
+
+struct lpfc_idiag_offset {
+	uint32_t last_rd;
+};
+
+#define LPFC_IDIAG_CMD_DATA_SIZE 8
+struct lpfc_idiag_cmd {
+	uint32_t opcode;
+#define LPFC_IDIAG_CMD_PCICFG_RD 0x00000001
+#define LPFC_IDIAG_CMD_PCICFG_WR 0x00000002
+#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003
+#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004
+
+#define LPFC_IDIAG_CMD_BARACC_RD 0x00000008
+#define LPFC_IDIAG_CMD_BARACC_WR 0x00000009
+#define LPFC_IDIAG_CMD_BARACC_ST 0x0000000a
+#define LPFC_IDIAG_CMD_BARACC_CL 0x0000000b
+
+#define LPFC_IDIAG_CMD_QUEACC_RD 0x00000011
+#define LPFC_IDIAG_CMD_QUEACC_WR 0x00000012
+#define LPFC_IDIAG_CMD_QUEACC_ST 0x00000013
+#define LPFC_IDIAG_CMD_QUEACC_CL 0x00000014
+
+#define LPFC_IDIAG_CMD_DRBACC_RD 0x00000021
+#define LPFC_IDIAG_CMD_DRBACC_WR 0x00000022
+#define LPFC_IDIAG_CMD_DRBACC_ST 0x00000023
+#define LPFC_IDIAG_CMD_DRBACC_CL 0x00000024
+
+#define LPFC_IDIAG_CMD_CTLACC_RD 0x00000031
+#define LPFC_IDIAG_CMD_CTLACC_WR 0x00000032
+#define LPFC_IDIAG_CMD_CTLACC_ST 0x00000033
+#define LPFC_IDIAG_CMD_CTLACC_CL 0x00000034
+
+#define LPFC_IDIAG_CMD_MBXACC_DP 0x00000041
+#define LPFC_IDIAG_BSG_MBXACC_DP 0x00000042
+
+#define LPFC_IDIAG_CMD_EXTACC_RD 0x00000051
+
+	uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE];
+};
+
+struct lpfc_idiag {
+	uint32_t active;
+	struct lpfc_idiag_cmd cmd;
+	struct lpfc_idiag_offset offset;
+	void *ptr_private;
+};
+#endif
+
+/* Mask for discovery_trace */
+#define LPFC_DISC_TRC_ELS_CMD		0x1	/* Trace ELS commands */
+#define LPFC_DISC_TRC_ELS_RSP		0x2	/* Trace ELS response */
+#define LPFC_DISC_TRC_ELS_UNSOL		0x4	/* Trace ELS rcv'ed   */
+#define LPFC_DISC_TRC_ELS_ALL		0x7	/* Trace ELS */
+#define LPFC_DISC_TRC_MBOX_VPORT	0x8	/* Trace vport MBOXs */
+#define LPFC_DISC_TRC_MBOX		0x10	/* Trace other MBOXs */
+#define LPFC_DISC_TRC_MBOX_ALL		0x18	/* Trace all MBOXs */
+#define LPFC_DISC_TRC_CT		0x20	/* Trace disc CT requests */
+#define LPFC_DISC_TRC_DSM		0x40    /* Trace DSM events */
+#define LPFC_DISC_TRC_RPORT		0x80    /* Trace rport events */
+#define LPFC_DISC_TRC_NODE		0x100   /* Trace ndlp state changes */
+
+#define LPFC_DISC_TRC_DISCOVERY		0xef    /* common mask for general
+						 * discovery */
+#endif /* H_LPFC_DEBUG_FS */
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_disc.h b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_disc.h
new file mode 100644
index 0000000..1d84b63
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_disc.h
@@ -0,0 +1,265 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#define FC_MAX_HOLD_RSCN     32	      /* max number of deferred RSCNs */
+#define FC_MAX_NS_RSP        64512    /* max size NameServer rsp */
+#define FC_MAXLOOP           126      /* max devices supported on a fc loop */
+#define LPFC_DISC_FLOGI_TMO  10	      /* Discovery FLOGI ratov */
+
+
+/* This is the protocol dependent definition for a Node List Entry.
+ * This is used by Fibre Channel protocol to support FCP.
+ */
+
+/* worker thread events */
+enum lpfc_work_type {
+	LPFC_EVT_ONLINE,
+	LPFC_EVT_OFFLINE_PREP,
+	LPFC_EVT_OFFLINE,
+	LPFC_EVT_WARM_START,
+	LPFC_EVT_KILL,
+	LPFC_EVT_ELS_RETRY,
+	LPFC_EVT_DEV_LOSS,
+	LPFC_EVT_FASTPATH_MGMT_EVT,
+	LPFC_EVT_RESET_HBA,
+};
+
+/* structure used to queue event to the discovery tasklet */
+struct lpfc_work_evt {
+	struct list_head      evt_listp;
+	void                 *evt_arg1;
+	void                 *evt_arg2;
+	enum lpfc_work_type   evt;
+};
+
+struct lpfc_scsi_check_condition_event;
+struct lpfc_scsi_varqueuedepth_event;
+struct lpfc_scsi_event_header;
+struct lpfc_fabric_event_header;
+struct lpfc_fcprdchkerr_event;
+
+/* structure used for sending events from fast path */
+struct lpfc_fast_path_event {
+	struct lpfc_work_evt work_evt;
+	struct lpfc_vport     *vport;
+	union {
+		struct lpfc_scsi_check_condition_event check_cond_evt;
+		struct lpfc_scsi_varqueuedepth_event queue_depth_evt;
+		struct lpfc_scsi_event_header scsi_evt;
+		struct lpfc_fabric_event_header fabric_evt;
+		struct lpfc_fcprdchkerr_event read_check_error;
+	} un;
+};
+
+#define LPFC_SLI4_MAX_XRI	1024	/* Used to make the ndlp's xri_bitmap */
+#define XRI_BITMAP_ULONGS (LPFC_SLI4_MAX_XRI / BITS_PER_LONG)
+struct lpfc_node_rrqs {
+	unsigned long xri_bitmap[XRI_BITMAP_ULONGS];
+};
+
+struct lpfc_nodelist {
+	struct list_head nlp_listp;
+	struct lpfc_name nlp_portname;
+	struct lpfc_name nlp_nodename;
+	uint32_t         nlp_flag;		/* entry  flags */
+	uint32_t         nlp_DID;		/* FC D_ID of entry */
+	uint32_t         nlp_last_elscmd;	/* Last ELS cmd sent */
+	uint16_t         nlp_type;
+#define NLP_FC_NODE        0x1			/* entry is an FC node */
+#define NLP_FABRIC         0x4			/* entry rep a Fabric entity */
+#define NLP_FCP_TARGET     0x8			/* entry is an FCP target */
+#define NLP_FCP_INITIATOR  0x10			/* entry is an FCP Initiator */
+
+	uint16_t        nlp_rpi;
+	uint16_t        nlp_state;		/* state transition indicator */
+	uint16_t        nlp_prev_state;		/* state transition indicator */
+	uint16_t        nlp_xri;		/* output exchange id for RPI */
+	uint16_t        nlp_sid;		/* scsi id */
+#define NLP_NO_SID		0xffff
+	uint16_t	nlp_maxframe;		/* Max RCV frame size */
+	uint8_t		nlp_class_sup;		/* Supported Classes */
+	uint8_t         nlp_retry;		/* used for ELS retries */
+	uint8_t         nlp_fcp_info;	        /* class info, bits 0-3 */
+#define NLP_FCP_2_DEVICE   0x10			/* FCP-2 device */
+
+	uint16_t        nlp_usg_map;	/* ndlp management usage bitmap */
+#define NLP_USG_NODE_ACT_BIT	0x1	/* Indicate ndlp is actively used */
+#define NLP_USG_IACT_REQ_BIT	0x2	/* Request to inactivate ndlp */
+#define NLP_USG_FREE_REQ_BIT	0x4	/* Request to invoke ndlp memory free */
+#define NLP_USG_FREE_ACK_BIT	0x8	/* Indicate ndlp memory free invoked */
+
+	struct timer_list   nlp_delayfunc;	/* Used for delayed ELS cmds */
+	struct lpfc_hba *phba;
+	struct fc_rport *rport;			/* Corresponding FC transport
+						   port structure */
+	struct lpfc_vport *vport;
+	struct lpfc_work_evt els_retry_evt;
+	struct lpfc_work_evt dev_loss_evt;
+	struct kref     kref;
+	atomic_t cmd_pending;
+	uint32_t cmd_qdepth;
+	unsigned long last_change_time;
+	struct lpfc_node_rrqs active_rrqs;
+	struct lpfc_scsicmd_bkt *lat_data;	/* Latency data */
+};
+struct lpfc_node_rrq {
+	struct list_head list;
+	uint16_t xritag;
+	uint16_t send_rrq;
+	uint16_t rxid;
+	uint32_t         nlp_DID;		/* FC D_ID of entry */
+	struct lpfc_vport *vport;
+	struct lpfc_nodelist *ndlp;
+	unsigned long rrq_stop_time;
+};
+
+/* Defines for nlp_flag (uint32) */
+#define NLP_IGNR_REG_CMPL  0x00000001 /* Rcvd rscn before we cmpl reg login */
+#define NLP_REG_LOGIN_SEND 0x00000002   /* sent reglogin to adapter */
+#define NLP_PLOGI_SND      0x00000020	/* sent PLOGI request for this entry */
+#define NLP_PRLI_SND       0x00000040	/* sent PRLI request for this entry */
+#define NLP_ADISC_SND      0x00000080	/* sent ADISC request for this entry */
+#define NLP_LOGO_SND       0x00000100	/* sent LOGO request for this entry */
+#define NLP_RNID_SND       0x00000400	/* sent RNID request for this entry */
+#define NLP_ELS_SND_MASK   0x000007e0	/* sent ELS request for this entry */
+#define NLP_DEFER_RM       0x00010000	/* Remove this ndlp if no longer used */
+#define NLP_DELAY_TMO      0x00020000	/* delay timeout is running for node */
+#define NLP_NPR_2B_DISC    0x00040000	/* node is included in num_disc_nodes */
+#define NLP_RCV_PLOGI      0x00080000	/* Rcv'ed PLOGI from remote system */
+#define NLP_LOGO_ACC       0x00100000	/* Process LOGO after ACC completes */
+#define NLP_TGT_NO_SCSIID  0x00200000	/* good PRLI but no binding for scsid */
+#define NLP_ACC_REGLOGIN   0x01000000	/* Issue Reg Login after successful
+					   ACC */
+#define NLP_NPR_ADISC      0x02000000	/* Issue ADISC when dq'ed from
+					   NPR list */
+#define NLP_RM_DFLT_RPI    0x04000000	/* need to remove leftover dflt RPI */
+#define NLP_NODEV_REMOVE   0x08000000	/* Defer removal till discovery ends */
+#define NLP_TARGET_REMOVE  0x10000000   /* Target remove in process */
+#define NLP_SC_REQ         0x20000000	/* Target requires authentication */
+#define NLP_RPI_REGISTERED 0x80000000	/* nlp_rpi is valid */
+
+/* ndlp usage management macros */
+#define NLP_CHK_NODE_ACT(ndlp)		(((ndlp)->nlp_usg_map \
+						& NLP_USG_NODE_ACT_BIT) \
+					&& \
+					!((ndlp)->nlp_usg_map \
+						& NLP_USG_FREE_ACK_BIT))
+#define NLP_SET_NODE_ACT(ndlp)		((ndlp)->nlp_usg_map \
+						|= NLP_USG_NODE_ACT_BIT)
+#define NLP_INT_NODE_ACT(ndlp)		((ndlp)->nlp_usg_map \
+						= NLP_USG_NODE_ACT_BIT)
+#define NLP_CLR_NODE_ACT(ndlp)		((ndlp)->nlp_usg_map \
+						&= ~NLP_USG_NODE_ACT_BIT)
+#define NLP_CHK_IACT_REQ(ndlp)          ((ndlp)->nlp_usg_map \
+						& NLP_USG_IACT_REQ_BIT)
+#define NLP_SET_IACT_REQ(ndlp)          ((ndlp)->nlp_usg_map \
+						|= NLP_USG_IACT_REQ_BIT)
+#define NLP_CHK_FREE_REQ(ndlp)		((ndlp)->nlp_usg_map \
+						& NLP_USG_FREE_REQ_BIT)
+#define NLP_SET_FREE_REQ(ndlp)		((ndlp)->nlp_usg_map \
+						|= NLP_USG_FREE_REQ_BIT)
+#define NLP_CHK_FREE_ACK(ndlp)		((ndlp)->nlp_usg_map \
+						& NLP_USG_FREE_ACK_BIT)
+#define NLP_SET_FREE_ACK(ndlp)		((ndlp)->nlp_usg_map \
+						|= NLP_USG_FREE_ACK_BIT)
+
+/* There are 4 different double linked lists nodelist entries can reside on.
+ * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
+ * when Link Up discovery or Registered State Change Notification (RSCN)
+ * processing is needed.  Each list holds the nodes that require a PLOGI or
+ * ADISC Extended Link Service (ELS) request.  These lists keep track of the
+ * nodes affected by an RSCN, or a Link Up (Typically, all nodes are effected
+ * by Link Up) event.  The unmapped_list contains all nodes that have
+ * successfully logged into at the Fibre Channel level.  The
+ * mapped_list will contain all nodes that are mapped FCP targets.
+ *
+ * The bind list is a list of undiscovered (potentially non-existent) nodes
+ * that we have saved binding information on. This information is used when
+ * nodes transition from the unmapped to the mapped list.
+ */
+
+/* Defines for nlp_state */
+#define NLP_STE_UNUSED_NODE       0x0	/* node is just allocated */
+#define NLP_STE_PLOGI_ISSUE       0x1	/* PLOGI was sent to NL_PORT */
+#define NLP_STE_ADISC_ISSUE       0x2	/* ADISC was sent to NL_PORT */
+#define NLP_STE_REG_LOGIN_ISSUE   0x3	/* REG_LOGIN was issued for NL_PORT */
+#define NLP_STE_PRLI_ISSUE        0x4	/* PRLI was sent to NL_PORT */
+#define NLP_STE_UNMAPPED_NODE     0x5	/* PRLI completed from NL_PORT */
+#define NLP_STE_MAPPED_NODE       0x6	/* Identified as a FCP Target */
+#define NLP_STE_NPR_NODE          0x7	/* NPort disappeared */
+#define NLP_STE_MAX_STATE         0x8
+#define NLP_STE_FREED_NODE        0xff	/* node entry was freed to MEM_NLP */
+
+/* For UNUSED_NODE state, the node has just been allocated.
+ * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
+ * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
+ * and put on the unmapped list. For ADISC processing, the node is taken off
+ * the ADISC list and placed on either the mapped or unmapped list (depending
+ * on its previous state). Once on the unmapped list, a PRLI is issued and the
+ * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
+ * changed to PRLI_COMPL. If the completion indicates a mapped
+ * node, the node is taken off the unmapped list. The binding list is checked
+ * for a valid binding, or a binding is automatically assigned. If binding
+ * assignment is unsuccessful, the node is left on the unmapped list. If
+ * binding assignment is successful, the associated binding list entry (if
+ * any) is removed, and the node is placed on the mapped list.
+ */
+/*
+ * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
+ * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
+ * expire, all effected nodes will receive a DEVICE_RM event.
+ */
+/*
+ * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
+ * to either the ADISC or PLOGI list.  After a Nameserver query or ALPA loopmap
+ * check, additional nodes may be added (DEVICE_ADD) or removed (DEVICE_RM) to /
+ * from the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
+ * we will first process the ADISC list.  32 entries are processed initially and
+ * ADISC is initited for each one.  Completions / Events for each node are
+ * funnelled thru the state machine.  As each node finishes ADISC processing, it
+ * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
+ * waiting, and the ADISC list count is identically 0, then we are done. For
+ * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
+ * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
+ * list.  32 entries are processed initially and PLOGI is initited for each one.
+ * Completions / Events for each node are funnelled thru the state machine.  As
+ * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
+ * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
+ * identically 0, then we are done. We have now completed discovery / RSCN
+ * handling. Upon completion, ALL nodes should be on either the mapped or
+ * unmapped lists.
+ */
+
+/* Defines for Node List Entry Events that could happen */
+#define NLP_EVT_RCV_PLOGI         0x0	/* Rcv'd an ELS PLOGI command */
+#define NLP_EVT_RCV_PRLI          0x1	/* Rcv'd an ELS PRLI  command */
+#define NLP_EVT_RCV_LOGO          0x2	/* Rcv'd an ELS LOGO  command */
+#define NLP_EVT_RCV_ADISC         0x3	/* Rcv'd an ELS ADISC command */
+#define NLP_EVT_RCV_PDISC         0x4	/* Rcv'd an ELS PDISC command */
+#define NLP_EVT_RCV_PRLO          0x5	/* Rcv'd an ELS PRLO  command */
+#define NLP_EVT_CMPL_PLOGI        0x6	/* Sent an ELS PLOGI command */
+#define NLP_EVT_CMPL_PRLI         0x7	/* Sent an ELS PRLI  command */
+#define NLP_EVT_CMPL_LOGO         0x8	/* Sent an ELS LOGO  command */
+#define NLP_EVT_CMPL_ADISC        0x9	/* Sent an ELS ADISC command */
+#define NLP_EVT_CMPL_REG_LOGIN    0xa	/* REG_LOGIN mbox cmd completed */
+#define NLP_EVT_DEVICE_RM         0xb	/* Device not found in NS / ALPAmap */
+#define NLP_EVT_DEVICE_RECOVERY   0xc	/* Device existence unknown */
+#define NLP_EVT_MAX_EVENT         0xd
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_els.c b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_els.c
new file mode 100644
index 0000000..3407b39
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_els.c
@@ -0,0 +1,7898 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+/* See Fibre Channel protocol T11 FC-LS for details */
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
+
+static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
+			  struct lpfc_iocbq *);
+static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
+			struct lpfc_iocbq *);
+static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
+static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
+				struct lpfc_nodelist *ndlp, uint8_t retry);
+static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
+				  struct lpfc_iocbq *iocb);
+
+static int lpfc_max_els_tries = 3;
+
+/**
+ * lpfc_els_chk_latt - Check host link attention event for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine checks whether there is an outstanding host link
+ * attention event during the discovery process with the @vport. It is done
+ * by reading the HBA's Host Attention (HA) register. If there is any host
+ * link attention events during this @vport's discovery process, the @vport
+ * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
+ * be issued if the link state is not already in host link cleared state,
+ * and a return code shall indicate whether the host link attention event
+ * had happened.
+ *
+ * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
+ * state in LPFC_VPORT_READY, the request for checking host link attention
+ * event will be ignored and a return code shall indicate no host link
+ * attention event had happened.
+ *
+ * Return codes
+ *   0 - no host link attention event happened
+ *   1 - host link attention event happened
+ **/
+int
+lpfc_els_chk_latt(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	uint32_t ha_copy;
+
+	if (vport->port_state >= LPFC_VPORT_READY ||
+	    phba->link_state == LPFC_LINK_DOWN ||
+	    phba->sli_rev > LPFC_SLI_REV3)
+		return 0;
+
+	/* Read the HBA Host Attention Register */
+	if (lpfc_readl(phba->HAregaddr, &ha_copy))
+		return 1;
+
+	if (!(ha_copy & HA_LATT))
+		return 0;
+
+	/* Pending Link Event during Discovery */
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+			 "0237 Pending Link Event during "
+			 "Discovery: State x%x\n",
+			 phba->pport->port_state);
+
+	/* CLEAR_LA should re-enable link attention events and
+	 * we should then immediately take a LATT event. The
+	 * LATT processing should call lpfc_linkdown() which
+	 * will cleanup any left over in-progress discovery
+	 * events.
+	 */
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag |= FC_ABORT_DISCOVERY;
+	spin_unlock_irq(shost->host_lock);
+
+	if (phba->link_state != LPFC_CLEAR_LA)
+		lpfc_issue_clear_la(phba, vport);
+
+	return 1;
+}
+
+/**
+ * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @expectRsp: flag indicating whether response is expected.
+ * @cmdSize: size of the ELS command.
+ * @retry: number of retries to the command IOCB when it fails.
+ * @ndlp: pointer to a node-list data structure.
+ * @did: destination identifier.
+ * @elscmd: the ELS command code.
+ *
+ * This routine is used for allocating a lpfc-IOCB data structure from
+ * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
+ * passed into the routine for discovery state machine to issue an Extended
+ * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
+ * and preparation routine that is used by all the discovery state machine
+ * routines and the ELS command-specific fields will be later set up by
+ * the individual discovery machine routines after calling this routine
+ * allocating and preparing a generic IOCB data structure. It fills in the
+ * Buffer Descriptor Entries (BDEs), allocates buffers for both command
+ * payload and response payload (if expected). The reference count on the
+ * ndlp is incremented by 1 and the reference to the ndlp is put into
+ * context1 of the IOCB data structure for this IOCB to hold the ndlp
+ * reference for the command's callback function to access later.
+ *
+ * Return code
+ *   Pointer to the newly allocated/prepared els iocb data structure
+ *   NULL - when els iocb data structure allocation/preparation failed
+ **/
+struct lpfc_iocbq *
+lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
+		   uint16_t cmdSize, uint8_t retry,
+		   struct lpfc_nodelist *ndlp, uint32_t did,
+		   uint32_t elscmd)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
+	struct ulp_bde64 *bpl;
+	IOCB_t *icmd;
+
+
+	if (!lpfc_is_link_up(phba))
+		return NULL;
+
+	/* Allocate buffer for  command iocb */
+	elsiocb = lpfc_sli_get_iocbq(phba);
+
+	if (elsiocb == NULL)
+		return NULL;
+
+	/*
+	 * If this command is for fabric controller and HBA running
+	 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
+	 */
+	if ((did == Fabric_DID) &&
+		(phba->hba_flag & HBA_FIP_SUPPORT) &&
+		((elscmd == ELS_CMD_FLOGI) ||
+		 (elscmd == ELS_CMD_FDISC) ||
+		 (elscmd == ELS_CMD_LOGO)))
+		switch (elscmd) {
+		case ELS_CMD_FLOGI:
+		elsiocb->iocb_flag |=
+			((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
+					& LPFC_FIP_ELS_ID_MASK);
+		break;
+		case ELS_CMD_FDISC:
+		elsiocb->iocb_flag |=
+			((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
+					& LPFC_FIP_ELS_ID_MASK);
+		break;
+		case ELS_CMD_LOGO:
+		elsiocb->iocb_flag |=
+			((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
+					& LPFC_FIP_ELS_ID_MASK);
+		break;
+		}
+	else
+		elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
+
+	icmd = &elsiocb->iocb;
+
+	/* fill in BDEs for command */
+	/* Allocate buffer for command payload */
+	pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (pcmd)
+		pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
+	if (!pcmd || !pcmd->virt)
+		goto els_iocb_free_pcmb_exit;
+
+	INIT_LIST_HEAD(&pcmd->list);
+
+	/* Allocate buffer for response payload */
+	if (expectRsp) {
+		prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+		if (prsp)
+			prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+						     &prsp->phys);
+		if (!prsp || !prsp->virt)
+			goto els_iocb_free_prsp_exit;
+		INIT_LIST_HEAD(&prsp->list);
+	} else
+		prsp = NULL;
+
+	/* Allocate buffer for Buffer ptr list */
+	pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (pbuflist)
+		pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+						 &pbuflist->phys);
+	if (!pbuflist || !pbuflist->virt)
+		goto els_iocb_free_pbuf_exit;
+
+	INIT_LIST_HEAD(&pbuflist->list);
+
+	icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
+	icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
+	icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+	icmd->un.elsreq64.remoteID = did;	/* DID */
+	if (expectRsp) {
+		icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
+		icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
+		icmd->ulpTimeout = phba->fc_ratov * 2;
+	} else {
+		icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
+		icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
+	}
+	icmd->ulpBdeCount = 1;
+	icmd->ulpLe = 1;
+	icmd->ulpClass = CLASS3;
+
+	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+		icmd->un.elsreq64.myID = vport->fc_myDID;
+
+		/* For ELS_REQUEST64_CR, use the VPI by default */
+		icmd->ulpContext = phba->vpi_ids[vport->vpi];
+		icmd->ulpCt_h = 0;
+		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
+		if (elscmd == ELS_CMD_ECHO)
+			icmd->ulpCt_l = 0; /* context = invalid RPI */
+		else
+			icmd->ulpCt_l = 1; /* context = VPI */
+	}
+
+	bpl = (struct ulp_bde64 *) pbuflist->virt;
+	bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
+	bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
+	bpl->tus.f.bdeSize = cmdSize;
+	bpl->tus.f.bdeFlags = 0;
+	bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+	if (expectRsp) {
+		bpl++;
+		bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
+		bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
+		bpl->tus.f.bdeSize = FCELSSIZE;
+		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		bpl->tus.w = le32_to_cpu(bpl->tus.w);
+	}
+
+	/* prevent preparing iocb with NULL ndlp reference */
+	elsiocb->context1 = lpfc_nlp_get(ndlp);
+	if (!elsiocb->context1)
+		goto els_iocb_free_pbuf_exit;
+	elsiocb->context2 = pcmd;
+	elsiocb->context3 = pbuflist;
+	elsiocb->retry = retry;
+	elsiocb->vport = vport;
+	elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
+
+	if (prsp) {
+		list_add(&prsp->list, &pcmd->list);
+	}
+	if (expectRsp) {
+		/* Xmit ELS command <elsCmd> to remote NPORT <did> */
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+				 "0116 Xmit ELS command x%x to remote "
+				 "NPORT x%x I/O tag: x%x, port state: x%x\n",
+				 elscmd, did, elsiocb->iotag,
+				 vport->port_state);
+	} else {
+		/* Xmit ELS response <elsCmd> to remote NPORT <did> */
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+				 "0117 Xmit ELS response x%x to remote "
+				 "NPORT x%x I/O tag: x%x, size: x%x\n",
+				 elscmd, ndlp->nlp_DID, elsiocb->iotag,
+				 cmdSize);
+	}
+	return elsiocb;
+
+els_iocb_free_pbuf_exit:
+	if (expectRsp)
+		lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
+	kfree(pbuflist);
+
+els_iocb_free_prsp_exit:
+	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
+	kfree(prsp);
+
+els_iocb_free_pcmb_exit:
+	kfree(pcmd);
+	lpfc_sli_release_iocbq(phba, elsiocb);
+	return NULL;
+}
+
+/**
+ * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues a fabric registration login for a @vport. An
+ * active ndlp node with Fabric_DID must already exist for this @vport.
+ * The routine invokes two mailbox commands to carry out fabric registration
+ * login through the HBA firmware: the first mailbox command requests the
+ * HBA to perform link configuration for the @vport; and the second mailbox
+ * command requests the HBA to perform the actual fabric registration login
+ * with the @vport.
+ *
+ * Return code
+ *   0 - successfully issued fabric registration login for @vport
+ *   -ENXIO -- failed to issue fabric registration login for @vport
+ **/
+int
+lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	LPFC_MBOXQ_t *mbox;
+	struct lpfc_dmabuf *mp;
+	struct lpfc_nodelist *ndlp;
+	struct serv_parm *sp;
+	int rc;
+	int err = 0;
+
+	sp = &phba->fc_fabparam;
+	ndlp = lpfc_findnode_did(vport, Fabric_DID);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+		err = 1;
+		goto fail;
+	}
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		err = 2;
+		goto fail;
+	}
+
+	vport->port_state = LPFC_FABRIC_CFG_LINK;
+	lpfc_config_link(phba, mbox);
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	mbox->vport = vport;
+
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		err = 3;
+		goto fail_free_mbox;
+	}
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		err = 4;
+		goto fail;
+	}
+	rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
+			  ndlp->nlp_rpi);
+	if (rc) {
+		err = 5;
+		goto fail_free_mbox;
+	}
+
+	mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
+	mbox->vport = vport;
+	/* increment the reference count on ndlp to hold reference
+	 * for the callback routine.
+	 */
+	mbox->context2 = lpfc_nlp_get(ndlp);
+
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		err = 6;
+		goto fail_issue_reg_login;
+	}
+
+	return 0;
+
+fail_issue_reg_login:
+	/* decrement the reference count on ndlp just incremented
+	 * for the failed mbox command.
+	 */
+	lpfc_nlp_put(ndlp);
+	mp = (struct lpfc_dmabuf *) mbox->context1;
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+fail_free_mbox:
+	mempool_free(mbox, phba->mbox_mem_pool);
+
+fail:
+	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+		"0249 Cannot issue Register Fabric login: Err %d\n", err);
+	return -ENXIO;
+}
+
+/**
+ * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
+ * the @vport. This mailbox command is necessary for SLI4 port only.
+ *
+ * Return code
+ *   0 - successfully issued REG_VFI for @vport
+ *   A failure code otherwise.
+ **/
+int
+lpfc_issue_reg_vfi(struct lpfc_vport *vport)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	LPFC_MBOXQ_t *mboxq;
+	struct lpfc_nodelist *ndlp;
+	struct serv_parm *sp;
+	struct lpfc_dmabuf *dmabuf;
+	int rc = 0;
+
+	sp = &phba->fc_fabparam;
+	/* move forward in case of SLI4 FC port loopback test */
+	if ((phba->sli_rev == LPFC_SLI_REV4) &&
+	    !(phba->link_flag & LS_LOOPBACK_MODE)) {
+		ndlp = lpfc_findnode_did(vport, Fabric_DID);
+		if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+			rc = -ENODEV;
+			goto fail;
+		}
+	}
+
+	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!dmabuf) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+	dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
+	if (!dmabuf->virt) {
+		rc = -ENOMEM;
+		goto fail_free_dmabuf;
+	}
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		rc = -ENOMEM;
+		goto fail_free_coherent;
+	}
+	vport->port_state = LPFC_FABRIC_CFG_LINK;
+	memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
+	lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
+	mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
+	mboxq->vport = vport;
+	mboxq->context1 = dmabuf;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		rc = -ENXIO;
+		goto fail_free_mbox;
+	}
+	return 0;
+
+fail_free_mbox:
+	mempool_free(mboxq, phba->mbox_mem_pool);
+fail_free_coherent:
+	lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+fail_free_dmabuf:
+	kfree(dmabuf);
+fail:
+	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+		"0289 Issue Register VFI failed: Err %d\n", rc);
+	return rc;
+}
+
+/**
+ * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
+ * the @vport. This mailbox command is necessary for SLI4 port only.
+ *
+ * Return code
+ *   0 - successfully issued REG_VFI for @vport
+ *   A failure code otherwise.
+ **/
+int
+lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct Scsi_Host *shost;
+	LPFC_MBOXQ_t *mboxq;
+	int rc;
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+				"2556 UNREG_VFI mbox allocation failed"
+				"HBA state x%x\n", phba->pport->port_state);
+		return -ENOMEM;
+	}
+
+	lpfc_unreg_vfi(mboxq, vport);
+	mboxq->vport = vport;
+	mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
+
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+				"2557 UNREG_VFI issue mbox failed rc x%x "
+				"HBA state x%x\n",
+				rc, phba->pport->port_state);
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		return -EIO;
+	}
+
+	shost = lpfc_shost_from_vport(vport);
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_VFI_REGISTERED;
+	spin_unlock_irq(shost->host_lock);
+	return 0;
+}
+
+/**
+ * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @sp: pointer to service parameter data structure.
+ *
+ * This routine is called from FLOGI/FDISC completion handler functions.
+ * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
+ * node nodename is changed in the completion service parameter else return
+ * 0. This function also set flag in the vport data structure to delay
+ * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
+ * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
+ * node nodename is changed in the completion service parameter.
+ *
+ * Return code
+ *   0 - FCID and Fabric Nodename and Fabric portname is not changed.
+ *   1 - FCID or Fabric Nodename or Fabric portname is changed.
+ *
+ **/
+static uint8_t
+lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
+		struct serv_parm *sp)
+{
+	uint8_t fabric_param_changed = 0;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if ((vport->fc_prevDID != vport->fc_myDID) ||
+		memcmp(&vport->fabric_portname, &sp->portName,
+			sizeof(struct lpfc_name)) ||
+		memcmp(&vport->fabric_nodename, &sp->nodeName,
+			sizeof(struct lpfc_name)))
+		fabric_param_changed = 1;
+
+	/*
+	 * Word 1 Bit 31 in common service parameter is overloaded.
+	 * Word 1 Bit 31 in FLOGI request is multiple NPort request
+	 * Word 1 Bit 31 in FLOGI response is clean address bit
+	 *
+	 * If fabric parameter is changed and clean address bit is
+	 * cleared delay nport discovery if
+	 * - vport->fc_prevDID != 0 (not initial discovery) OR
+	 * - lpfc_delay_discovery module parameter is set.
+	 */
+	if (fabric_param_changed && !sp->cmn.clean_address_bit &&
+	    (vport->fc_prevDID || lpfc_delay_discovery)) {
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_DISC_DELAYED;
+		spin_unlock_irq(shost->host_lock);
+	}
+
+	return fabric_param_changed;
+}
+
+
+/**
+ * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @sp: pointer to service parameter data structure.
+ * @irsp: pointer to the IOCB within the lpfc response IOCB.
+ *
+ * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
+ * function to handle the completion of a Fabric Login (FLOGI) into a fabric
+ * port in a fabric topology. It properly sets up the parameters to the @ndlp
+ * from the IOCB response. It also check the newly assigned N_Port ID to the
+ * @vport against the previously assigned N_Port ID. If it is different from
+ * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
+ * is invoked on all the remaining nodes with the @vport to unregister the
+ * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
+ * is invoked to register login to the fabric.
+ *
+ * Return code
+ *   0 - Success (currently, always return 0)
+ **/
+static int
+lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   struct serv_parm *sp, IOCB_t *irsp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_nodelist *np;
+	struct lpfc_nodelist *next_np;
+	uint8_t fabric_param_changed;
+
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag |= FC_FABRIC;
+	spin_unlock_irq(shost->host_lock);
+
+	phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
+	if (sp->cmn.edtovResolution)	/* E_D_TOV ticks are in nanoseconds */
+		phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
+
+	phba->fc_edtovResol = sp->cmn.edtovResolution;
+	phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
+
+	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_PUBLIC_LOOP;
+		spin_unlock_irq(shost->host_lock);
+	}
+
+	vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
+	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
+	ndlp->nlp_class_sup = 0;
+	if (sp->cls1.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS1;
+	if (sp->cls2.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS2;
+	if (sp->cls3.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS3;
+	if (sp->cls4.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS4;
+	ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
+				sp->cmn.bbRcvSizeLsb;
+
+	fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
+	memcpy(&vport->fabric_portname, &sp->portName,
+			sizeof(struct lpfc_name));
+	memcpy(&vport->fabric_nodename, &sp->nodeName,
+			sizeof(struct lpfc_name));
+	memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
+
+	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+		if (sp->cmn.response_multiple_NPort) {
+			lpfc_printf_vlog(vport, KERN_WARNING,
+					 LOG_ELS | LOG_VPORT,
+					 "1816 FLOGI NPIV supported, "
+					 "response data 0x%x\n",
+					 sp->cmn.response_multiple_NPort);
+			spin_lock_irq(&phba->hbalock);
+			phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
+			spin_unlock_irq(&phba->hbalock);
+		} else {
+			/* Because we asked f/w for NPIV it still expects us
+			to call reg_vnpid atleast for the physcial host */
+			lpfc_printf_vlog(vport, KERN_WARNING,
+					 LOG_ELS | LOG_VPORT,
+					 "1817 Fabric does not support NPIV "
+					 "- configuring single port mode.\n");
+			spin_lock_irq(&phba->hbalock);
+			phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
+			spin_unlock_irq(&phba->hbalock);
+		}
+	}
+
+	if (fabric_param_changed &&
+		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+
+		/* If our NportID changed, we need to ensure all
+		 * remaining NPORTs get unreg_login'ed.
+		 */
+		list_for_each_entry_safe(np, next_np,
+					&vport->fc_nodes, nlp_listp) {
+			if (!NLP_CHK_NODE_ACT(np))
+				continue;
+			if ((np->nlp_state != NLP_STE_NPR_NODE) ||
+				   !(np->nlp_flag & NLP_NPR_ADISC))
+				continue;
+			spin_lock_irq(shost->host_lock);
+			np->nlp_flag &= ~NLP_NPR_ADISC;
+			spin_unlock_irq(shost->host_lock);
+			lpfc_unreg_rpi(vport, np);
+		}
+		lpfc_cleanup_pending_mbox(vport);
+
+		if (phba->sli_rev == LPFC_SLI_REV4) {
+			lpfc_sli4_unreg_all_rpis(vport);
+			lpfc_mbx_unreg_vpi(vport);
+			spin_lock_irq(shost->host_lock);
+			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+			/*
+			* If VPI is unreged, driver need to do INIT_VPI
+			* before re-registering
+			*/
+			vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+			spin_unlock_irq(shost->host_lock);
+		}
+	} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
+		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+			/*
+			 * Driver needs to re-reg VPI in order for f/w
+			 * to update the MAC address.
+			 */
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+			lpfc_register_new_vport(phba, vport, ndlp);
+			return 0;
+	}
+
+	if (phba->sli_rev < LPFC_SLI_REV4) {
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
+		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
+		    vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+			lpfc_register_new_vport(phba, vport, ndlp);
+		else
+			lpfc_issue_fabric_reglogin(vport);
+	} else {
+		ndlp->nlp_type |= NLP_FABRIC;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+		if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
+			(vport->vpi_state & LPFC_VPI_REGISTERED)) {
+			lpfc_start_fdiscs(phba);
+			lpfc_do_scr_ns_plogi(phba, vport);
+		} else if (vport->fc_flag & FC_VFI_REGISTERED)
+			lpfc_issue_init_vpi(vport);
+		else {
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+					"3135 Need register VFI: (x%x/%x)\n",
+					vport->fc_prevDID, vport->fc_myDID);
+			lpfc_issue_reg_vfi(vport);
+		}
+	}
+	return 0;
+}
+
+/**
+ * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @sp: pointer to service parameter data structure.
+ *
+ * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
+ * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
+ * in a point-to-point topology. First, the @vport's N_Port Name is compared
+ * with the received N_Port Name: if the @vport's N_Port Name is greater than
+ * the received N_Port Name lexicographically, this node shall assign local
+ * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
+ * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
+ * this node shall just wait for the remote node to issue PLOGI and assign
+ * N_Port IDs.
+ *
+ * Return code
+ *   0 - Success
+ *   -ENXIO - Fail
+ **/
+static int
+lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  struct serv_parm *sp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	LPFC_MBOXQ_t *mbox;
+	int rc;
+
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+	spin_unlock_irq(shost->host_lock);
+
+	phba->fc_edtov = FF_DEF_EDTOV;
+	phba->fc_ratov = FF_DEF_RATOV;
+	rc = memcmp(&vport->fc_portname, &sp->portName,
+		    sizeof(vport->fc_portname));
+	if (rc >= 0) {
+		/* This side will initiate the PLOGI */
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_PT2PT_PLOGI;
+		spin_unlock_irq(shost->host_lock);
+
+		/*
+		 * N_Port ID cannot be 0, set our to LocalID the other
+		 * side will be RemoteID.
+		 */
+
+		/* not equal */
+		if (rc)
+			vport->fc_myDID = PT2PT_LocalID;
+
+		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (!mbox)
+			goto fail;
+
+		lpfc_config_link(phba, mbox);
+
+		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		mbox->vport = vport;
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+		if (rc == MBX_NOT_FINISHED) {
+			mempool_free(mbox, phba->mbox_mem_pool);
+			goto fail;
+		}
+		/* Decrement ndlp reference count indicating that ndlp can be
+		 * safely released when other references to it are done.
+		 */
+		lpfc_nlp_put(ndlp);
+
+		ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
+		if (!ndlp) {
+			/*
+			 * Cannot find existing Fabric ndlp, so allocate a
+			 * new one
+			 */
+			ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+			if (!ndlp)
+				goto fail;
+			lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
+		} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+			ndlp = lpfc_enable_node(vport, ndlp,
+						NLP_STE_UNUSED_NODE);
+			if(!ndlp)
+				goto fail;
+		}
+
+		memcpy(&ndlp->nlp_portname, &sp->portName,
+		       sizeof(struct lpfc_name));
+		memcpy(&ndlp->nlp_nodename, &sp->nodeName,
+		       sizeof(struct lpfc_name));
+		/* Set state will put ndlp onto node list if not already done */
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+		spin_unlock_irq(shost->host_lock);
+	} else
+		/* This side will wait for the PLOGI, decrement ndlp reference
+		 * count indicating that ndlp can be released when other
+		 * references to it are done.
+		 */
+		lpfc_nlp_put(ndlp);
+
+	/* If we are pt2pt with another NPort, force NPIV off! */
+	phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
+
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag |= FC_PT2PT;
+	spin_unlock_irq(shost->host_lock);
+
+	/* Start discovery - this should just do CLEAR_LA */
+	lpfc_disc_start(vport);
+	return 0;
+fail:
+	return -ENXIO;
+}
+
+/**
+ * lpfc_cmpl_els_flogi - Completion callback function for flogi
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the top-level completion callback function for issuing
+ * a Fabric Login (FLOGI) command. If the response IOCB reported error,
+ * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
+ * retry has been made (either immediately or delayed with lpfc_els_retry()
+ * returning 1), the command IOCB will be released and function returned.
+ * If the retry attempt has been given up (possibly reach the maximum
+ * number of retries), one additional decrement of ndlp reference shall be
+ * invoked before going out after releasing the command IOCB. This will
+ * actually release the remote node (Note, lpfc_els_free_iocb() will also
+ * invoke one decrement of ndlp reference count). If no error reported in
+ * the IOCB status, the command Port ID field is used to determine whether
+ * this is a point-to-point topology or a fabric topology: if the Port ID
+ * field is assigned, it is a fabric topology; otherwise, it is a
+ * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
+ * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
+ * specific topology completion conditions.
+ **/
+static void
+lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		    struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	IOCB_t *irsp = &rspiocb->iocb;
+	struct lpfc_nodelist *ndlp = cmdiocb->context1;
+	struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+	struct serv_parm *sp;
+	uint16_t fcf_index;
+	int rc;
+
+	/* Check to see if link went down during discovery */
+	if (lpfc_els_chk_latt(vport)) {
+		/* One additional decrement on node reference count to
+		 * trigger the release of the node
+		 */
+		lpfc_nlp_put(ndlp);
+		goto out;
+	}
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"FLOGI cmpl:      status:x%x/x%x state:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4],
+		vport->port_state);
+
+	if (irsp->ulpStatus) {
+		/*
+		 * In case of FIP mode, perform roundrobin FCF failover
+		 * due to new FCF discovery
+		 */
+		if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
+		    (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
+			if (phba->link_state < LPFC_LINK_UP)
+				goto stop_rr_fcf_flogi;
+			if ((phba->fcoe_cvl_eventtag_attn ==
+			     phba->fcoe_cvl_eventtag) &&
+			    (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+			    (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))
+				goto stop_rr_fcf_flogi;
+			else
+				phba->fcoe_cvl_eventtag_attn =
+					phba->fcoe_cvl_eventtag;
+			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
+					"2611 FLOGI failed on FCF (x%x), "
+					"status:x%x/x%x, tmo:x%x, perform "
+					"roundrobin FCF failover\n",
+					phba->fcf.current_rec.fcf_indx,
+					irsp->ulpStatus, irsp->un.ulpWord[4],
+					irsp->ulpTimeout);
+			lpfc_sli4_set_fcf_flogi_fail(phba,
+					phba->fcf.current_rec.fcf_indx);
+			fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
+			rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
+			if (rc)
+				goto out;
+		}
+
+stop_rr_fcf_flogi:
+		/* FLOGI failure */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				"2858 FLOGI failure Status:x%x/x%x TMO:x%x\n",
+				irsp->ulpStatus, irsp->un.ulpWord[4],
+				irsp->ulpTimeout);
+
+		/* Check for retry */
+		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
+			goto out;
+
+		/* FLOGI failure */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
+				 irsp->ulpStatus, irsp->un.ulpWord[4],
+				 irsp->ulpTimeout);
+
+		/* FLOGI failed, so there is no fabric */
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+		spin_unlock_irq(shost->host_lock);
+
+		/* If private loop, then allow max outstanding els to be
+		 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
+		 * alpa map would take too long otherwise.
+		 */
+		if (phba->alpa_map[0] == 0)
+			vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
+		if ((phba->sli_rev == LPFC_SLI_REV4) &&
+		    (!(vport->fc_flag & FC_VFI_REGISTERED) ||
+		     (vport->fc_prevDID != vport->fc_myDID))) {
+			if (vport->fc_flag & FC_VFI_REGISTERED)
+				lpfc_sli4_unreg_all_rpis(vport);
+			lpfc_issue_reg_vfi(vport);
+			lpfc_nlp_put(ndlp);
+			goto out;
+		}
+		goto flogifail;
+	}
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
+	vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
+	spin_unlock_irq(shost->host_lock);
+
+	/*
+	 * The FLogI succeeded.  Sync the data for the CPU before
+	 * accessing it.
+	 */
+	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+
+	sp = prsp->virt + sizeof(uint32_t);
+
+	/* FLOGI completes successfully */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0101 FLOGI completes successfully "
+			 "Data: x%x x%x x%x x%x\n",
+			 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
+			 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
+
+	if (vport->port_state == LPFC_FLOGI) {
+		/*
+		 * If Common Service Parameters indicate Nport
+		 * we are point to point, if Fport we are Fabric.
+		 */
+		if (sp->cmn.fPort)
+			rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
+		else if (!(phba->hba_flag & HBA_FCOE_MODE))
+			rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
+		else {
+			lpfc_printf_vlog(vport, KERN_ERR,
+				LOG_FIP | LOG_ELS,
+				"2831 FLOGI response with cleared Fabric "
+				"bit fcf_index 0x%x "
+				"Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
+				"Fabric Name "
+				"%02x%02x%02x%02x%02x%02x%02x%02x\n",
+				phba->fcf.current_rec.fcf_indx,
+				phba->fcf.current_rec.switch_name[0],
+				phba->fcf.current_rec.switch_name[1],
+				phba->fcf.current_rec.switch_name[2],
+				phba->fcf.current_rec.switch_name[3],
+				phba->fcf.current_rec.switch_name[4],
+				phba->fcf.current_rec.switch_name[5],
+				phba->fcf.current_rec.switch_name[6],
+				phba->fcf.current_rec.switch_name[7],
+				phba->fcf.current_rec.fabric_name[0],
+				phba->fcf.current_rec.fabric_name[1],
+				phba->fcf.current_rec.fabric_name[2],
+				phba->fcf.current_rec.fabric_name[3],
+				phba->fcf.current_rec.fabric_name[4],
+				phba->fcf.current_rec.fabric_name[5],
+				phba->fcf.current_rec.fabric_name[6],
+				phba->fcf.current_rec.fabric_name[7]);
+			lpfc_nlp_put(ndlp);
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+			phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
+			spin_unlock_irq(&phba->hbalock);
+			goto out;
+		}
+		if (!rc) {
+			/* Mark the FCF discovery process done */
+			if (phba->hba_flag & HBA_FIP_SUPPORT)
+				lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
+						LOG_ELS,
+						"2769 FLOGI to FCF (x%x) "
+						"completed successfully\n",
+						phba->fcf.current_rec.fcf_indx);
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+			phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
+			spin_unlock_irq(&phba->hbalock);
+			goto out;
+		}
+	}
+
+flogifail:
+	lpfc_nlp_put(ndlp);
+
+	if (!lpfc_error_lost_link(irsp)) {
+		/* FLOGI failed, so just use loop map to make discovery list */
+		lpfc_disc_list_loopmap(vport);
+
+		/* Start discovery */
+		lpfc_disc_start(vport);
+	} else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
+			((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
+			(irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) &&
+			(phba->link_state != LPFC_CLEAR_LA)) {
+		/* If FLOGI failed enable link interrupt. */
+		lpfc_issue_clear_la(phba, vport);
+	}
+out:
+	lpfc_els_free_iocb(phba, cmdiocb);
+}
+
+/**
+ * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a Fabric Login (FLOGI) Request ELS command
+ * for a @vport. The initiator service parameters are put into the payload
+ * of the FLOGI Request IOCB and the top-level callback function pointer
+ * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
+ * function field. The lpfc_issue_fabric_iocb routine is invoked to send
+ * out FLOGI ELS command with one outstanding fabric IOCB at a time.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the FLOGI ELS command.
+ *
+ * Return code
+ *   0 - successfully issued flogi iocb for @vport
+ *   1 - failed to issue flogi iocb for @vport
+ **/
+static int
+lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		     uint8_t retry)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct serv_parm *sp;
+	IOCB_t *icmd;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_sli_ring *pring;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	uint32_t tmo;
+	int rc;
+
+	pring = &phba->sli.ring[LPFC_ELS_RING];
+
+	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_FLOGI);
+
+	if (!elsiocb)
+		return 1;
+
+	icmd = &elsiocb->iocb;
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	/* For FLOGI request, remainder of payload is service parameters */
+	*((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
+	pcmd += sizeof(uint32_t);
+	memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
+	sp = (struct serv_parm *) pcmd;
+
+	/* Setup CSPs accordingly for Fabric */
+	sp->cmn.e_d_tov = 0;
+	sp->cmn.w2.r_a_tov = 0;
+	sp->cmn.virtual_fabric_support = 0;
+	sp->cls1.classValid = 0;
+	sp->cls2.seqDelivery = 1;
+	sp->cls3.seqDelivery = 1;
+	if (sp->cmn.fcphLow < FC_PH3)
+		sp->cmn.fcphLow = FC_PH3;
+	if (sp->cmn.fcphHigh < FC_PH3)
+		sp->cmn.fcphHigh = FC_PH3;
+
+	if  (phba->sli_rev == LPFC_SLI_REV4) {
+		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+		    LPFC_SLI_INTF_IF_TYPE_0) {
+			elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
+			elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
+			/* FLOGI needs to be 3 for WQE FCFI */
+			/* Set the fcfi to the fcfi we registered with */
+			elsiocb->iocb.ulpContext = phba->fcf.fcfi;
+		}
+	} else {
+		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+			sp->cmn.request_multiple_Nport = 1;
+			/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
+			icmd->ulpCt_h = 1;
+			icmd->ulpCt_l = 0;
+		} else
+			sp->cmn.request_multiple_Nport = 0;
+	}
+
+	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
+		icmd->un.elsreq64.myID = 0;
+		icmd->un.elsreq64.fl = 1;
+	}
+
+	tmo = phba->fc_ratov;
+	phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
+	lpfc_set_disctmo(vport);
+	phba->fc_ratov = tmo;
+
+	phba->fc_stat.elsXmitFLOGI++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue FLOGI:     opt:x%x",
+		phba->sli3_options, 0, 0);
+
+	rc = lpfc_issue_fabric_iocb(phba, elsiocb);
+	if (rc == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
+ * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
+ * list and issues an abort IOCB commond on each outstanding IOCB that
+ * contains a active Fabric_DID ndlp. Note that this function is to issue
+ * the abort IOCB command on all the outstanding IOCBs, thus when this
+ * function returns, it does not guarantee all the IOCBs are actually aborted.
+ *
+ * Return code
+ *   0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
+ **/
+int
+lpfc_els_abort_flogi(struct lpfc_hba *phba)
+{
+	struct lpfc_sli_ring *pring;
+	struct lpfc_iocbq *iocb, *next_iocb;
+	struct lpfc_nodelist *ndlp;
+	IOCB_t *icmd;
+
+	/* Abort outstanding I/O on NPort <nlp_DID> */
+	lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+			"0201 Abort outstanding I/O on NPort x%x\n",
+			Fabric_DID);
+
+	pring = &phba->sli.ring[LPFC_ELS_RING];
+
+	/*
+	 * Check the txcmplq for an iocb that matches the nport the driver is
+	 * searching for.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+		icmd = &iocb->iocb;
+		if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
+			ndlp = (struct lpfc_nodelist *)(iocb->context1);
+			if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+			    (ndlp->nlp_DID == Fabric_DID))
+				lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+		}
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	return 0;
+}
+
+/**
+ * lpfc_initial_flogi - Issue an initial fabric login for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues an initial Fabric Login (FLOGI) for the @vport
+ * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
+ * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
+ * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
+ * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
+ * is then invoked with the @vport and the ndlp to perform the FLOGI for the
+ * @vport.
+ *
+ * Return code
+ *   0 - failed to issue initial flogi for @vport
+ *   1 - successfully issued initial flogi for @vport
+ **/
+int
+lpfc_initial_flogi(struct lpfc_vport *vport)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_nodelist *ndlp;
+
+	vport->port_state = LPFC_FLOGI;
+	lpfc_set_disctmo(vport);
+
+	/* First look for the Fabric ndlp */
+	ndlp = lpfc_findnode_did(vport, Fabric_DID);
+	if (!ndlp) {
+		/* Cannot find existing Fabric ndlp, so allocate a new one */
+		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+		if (!ndlp)
+			return 0;
+		lpfc_nlp_init(vport, ndlp, Fabric_DID);
+		/* Set the node type */
+		ndlp->nlp_type |= NLP_FABRIC;
+		/* Put ndlp onto node list */
+		lpfc_enqueue_node(vport, ndlp);
+	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+		/* re-setup ndlp without removing from node list */
+		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+		if (!ndlp)
+			return 0;
+	}
+
+	if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
+		/* This decrement of reference count to node shall kick off
+		 * the release of the node.
+		 */
+		lpfc_nlp_put(ndlp);
+		return 0;
+	}
+	return 1;
+}
+
+/**
+ * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues an initial Fabric Discover (FDISC) for the @vport
+ * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
+ * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
+ * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
+ * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
+ * is then invoked with the @vport and the ndlp to perform the FDISC for the
+ * @vport.
+ *
+ * Return code
+ *   0 - failed to issue initial fdisc for @vport
+ *   1 - successfully issued initial fdisc for @vport
+ **/
+int
+lpfc_initial_fdisc(struct lpfc_vport *vport)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_nodelist *ndlp;
+
+	/* First look for the Fabric ndlp */
+	ndlp = lpfc_findnode_did(vport, Fabric_DID);
+	if (!ndlp) {
+		/* Cannot find existing Fabric ndlp, so allocate a new one */
+		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+		if (!ndlp)
+			return 0;
+		lpfc_nlp_init(vport, ndlp, Fabric_DID);
+		/* Put ndlp onto node list */
+		lpfc_enqueue_node(vport, ndlp);
+	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+		/* re-setup ndlp without removing from node list */
+		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+		if (!ndlp)
+			return 0;
+	}
+
+	if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
+		/* decrement node reference count to trigger the release of
+		 * the node.
+		 */
+		lpfc_nlp_put(ndlp);
+		return 0;
+	}
+	return 1;
+}
+
+/**
+ * lpfc_more_plogi - Check and issue remaining plogis for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine checks whether there are more remaining Port Logins
+ * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
+ * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
+ * to issue ELS PLOGIs up to the configured discover threads with the
+ * @vport (@vport->cfg_discovery_threads). The function also decrement
+ * the @vport's num_disc_node by 1 if it is not already 0.
+ **/
+void
+lpfc_more_plogi(struct lpfc_vport *vport)
+{
+	int sentplogi;
+
+	if (vport->num_disc_nodes)
+		vport->num_disc_nodes--;
+
+	/* Continue discovery with <num_disc_nodes> PLOGIs to go */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0232 Continue discovery with %d PLOGIs to go "
+			 "Data: x%x x%x x%x\n",
+			 vport->num_disc_nodes, vport->fc_plogi_cnt,
+			 vport->fc_flag, vport->port_state);
+	/* Check to see if there are more PLOGIs to be sent */
+	if (vport->fc_flag & FC_NLP_MORE)
+		/* go thru NPR nodes and issue any remaining ELS PLOGIs */
+		sentplogi = lpfc_els_disc_plogi(vport);
+
+	return;
+}
+
+/**
+ * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
+ * @phba: pointer to lpfc hba data structure.
+ * @prsp: pointer to response IOCB payload.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine checks and indicates whether the WWPN of an N_Port, retrieved
+ * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
+ * The following cases are considered N_Port confirmed:
+ * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
+ * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
+ * it does not have WWPN assigned either. If the WWPN is confirmed, the
+ * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
+ * 1) if there is a node on vport list other than the @ndlp with the same
+ * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
+ * on that node to release the RPI associated with the node; 2) if there is
+ * no node found on vport list with the same WWPN of the N_Port PLOGI logged
+ * into, a new node shall be allocated (or activated). In either case, the
+ * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
+ * be released and the new_ndlp shall be put on to the vport node list and
+ * its pointer returned as the confirmed node.
+ *
+ * Note that before the @ndlp got "released", the keepDID from not-matching
+ * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
+ * of the @ndlp. This is because the release of @ndlp is actually to put it
+ * into an inactive state on the vport node list and the vport node list
+ * management algorithm does not allow two node with a same DID.
+ *
+ * Return code
+ *   pointer to the PLOGI N_Port @ndlp
+ **/
+static struct lpfc_nodelist *
+lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
+			 struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_vport    *vport = ndlp->vport;
+	struct lpfc_nodelist *new_ndlp;
+	struct lpfc_rport_data *rdata;
+	struct fc_rport *rport;
+	struct serv_parm *sp;
+	uint8_t  name[sizeof(struct lpfc_name)];
+	uint32_t rc, keepDID = 0;
+	int  put_node;
+	int  put_rport;
+	struct lpfc_node_rrqs rrq;
+
+	/* Fabric nodes can have the same WWPN so we don't bother searching
+	 * by WWPN.  Just return the ndlp that was given to us.
+	 */
+	if (ndlp->nlp_type & NLP_FABRIC)
+		return ndlp;
+
+	sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
+	memset(name, 0, sizeof(struct lpfc_name));
+
+	/* Now we find out if the NPort we are logging into, matches the WWPN
+	 * we have for that ndlp. If not, we have some work to do.
+	 */
+	new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
+
+	if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
+		return ndlp;
+	memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap));
+
+	if (!new_ndlp) {
+		rc = memcmp(&ndlp->nlp_portname, name,
+			    sizeof(struct lpfc_name));
+		if (!rc)
+			return ndlp;
+		new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
+		if (!new_ndlp)
+			return ndlp;
+		lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
+	} else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
+		rc = memcmp(&ndlp->nlp_portname, name,
+			    sizeof(struct lpfc_name));
+		if (!rc)
+			return ndlp;
+		new_ndlp = lpfc_enable_node(vport, new_ndlp,
+						NLP_STE_UNUSED_NODE);
+		if (!new_ndlp)
+			return ndlp;
+		keepDID = new_ndlp->nlp_DID;
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			memcpy(&rrq.xri_bitmap,
+				&new_ndlp->active_rrqs.xri_bitmap,
+				sizeof(new_ndlp->active_rrqs.xri_bitmap));
+	} else {
+		keepDID = new_ndlp->nlp_DID;
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			memcpy(&rrq.xri_bitmap,
+				&new_ndlp->active_rrqs.xri_bitmap,
+				sizeof(new_ndlp->active_rrqs.xri_bitmap));
+	}
+
+	lpfc_unreg_rpi(vport, new_ndlp);
+	new_ndlp->nlp_DID = ndlp->nlp_DID;
+	new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		memcpy(new_ndlp->active_rrqs.xri_bitmap,
+			&ndlp->active_rrqs.xri_bitmap,
+			sizeof(ndlp->active_rrqs.xri_bitmap));
+
+	if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
+		new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+	ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+
+	/* Set state will put new_ndlp on to node list if not already done */
+	lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
+
+	/* Move this back to NPR state */
+	if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
+		/* The new_ndlp is replacing ndlp totally, so we need
+		 * to put ndlp on UNUSED list and try to free it.
+		 */
+
+		/* Fix up the rport accordingly */
+		rport =  ndlp->rport;
+		if (rport) {
+			rdata = rport->dd_data;
+			if (rdata->pnode == ndlp) {
+				lpfc_nlp_put(ndlp);
+				ndlp->rport = NULL;
+				rdata->pnode = lpfc_nlp_get(new_ndlp);
+				new_ndlp->rport = rport;
+			}
+			new_ndlp->nlp_type = ndlp->nlp_type;
+		}
+		/* We shall actually free the ndlp with both nlp_DID and
+		 * nlp_portname fields equals 0 to avoid any ndlp on the
+		 * nodelist never to be used.
+		 */
+		if (ndlp->nlp_DID == 0) {
+			spin_lock_irq(&phba->ndlp_lock);
+			NLP_SET_FREE_REQ(ndlp);
+			spin_unlock_irq(&phba->ndlp_lock);
+		}
+
+		/* Two ndlps cannot have the same did on the nodelist */
+		ndlp->nlp_DID = keepDID;
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			memcpy(&ndlp->active_rrqs.xri_bitmap,
+				&rrq.xri_bitmap,
+				sizeof(ndlp->active_rrqs.xri_bitmap));
+		lpfc_drop_node(vport, ndlp);
+	}
+	else {
+		lpfc_unreg_rpi(vport, ndlp);
+		/* Two ndlps cannot have the same did */
+		ndlp->nlp_DID = keepDID;
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			memcpy(&ndlp->active_rrqs.xri_bitmap,
+				&rrq.xri_bitmap,
+				sizeof(ndlp->active_rrqs.xri_bitmap));
+		/* Since we are swapping the ndlp passed in with the new one
+		 * and the did has already been swapped, copy over the
+		 * state and names.
+		 */
+		memcpy(&new_ndlp->nlp_portname, &ndlp->nlp_portname,
+			sizeof(struct lpfc_name));
+		memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename,
+			sizeof(struct lpfc_name));
+		new_ndlp->nlp_state = ndlp->nlp_state;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		/* Fix up the rport accordingly */
+		rport = ndlp->rport;
+		if (rport) {
+			rdata = rport->dd_data;
+			put_node = rdata->pnode != NULL;
+			put_rport = ndlp->rport != NULL;
+			rdata->pnode = NULL;
+			ndlp->rport = NULL;
+			if (put_node)
+				lpfc_nlp_put(ndlp);
+			if (put_rport)
+				put_device(&rport->dev);
+		}
+	}
+	return new_ndlp;
+}
+
+/**
+ * lpfc_end_rscn - Check and handle more rscn for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine checks whether more Registration State Change
+ * Notifications (RSCNs) came in while the discovery state machine was in
+ * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
+ * invoked to handle the additional RSCNs for the @vport. Otherwise, the
+ * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
+ * handling the RSCNs.
+ **/
+void
+lpfc_end_rscn(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (vport->fc_flag & FC_RSCN_MODE) {
+		/*
+		 * Check to see if more RSCNs came in while we were
+		 * processing this one.
+		 */
+		if (vport->fc_rscn_id_cnt ||
+		    (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
+			lpfc_els_handle_rscn(vport);
+		else {
+			spin_lock_irq(shost->host_lock);
+			vport->fc_flag &= ~FC_RSCN_MODE;
+			spin_unlock_irq(shost->host_lock);
+		}
+	}
+}
+
+/**
+ * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine will call the clear rrq function to free the rrq and
+ * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
+ * exist then the clear_rrq is still called because the rrq needs to
+ * be freed.
+ **/
+
+static void
+lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		    struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	IOCB_t *irsp;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_node_rrq *rrq;
+
+	/* we pass cmdiocb to state machine which needs rspiocb as well */
+	rrq = cmdiocb->context_un.rrq;
+	cmdiocb->context_un.rsp_iocb = rspiocb;
+
+	irsp = &rspiocb->iocb;
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"RRQ cmpl:      status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4],
+		irsp->un.elsreq64.remoteID);
+
+	ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2882 RRQ completes to NPort x%x "
+				 "with no ndlp. Data: x%x x%x x%x\n",
+				 irsp->un.elsreq64.remoteID,
+				 irsp->ulpStatus, irsp->un.ulpWord[4],
+				 irsp->ulpIoTag);
+		goto out;
+	}
+
+	/* rrq completes to NPort <nlp_DID> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "2880 RRQ completes to NPort x%x "
+			 "Data: x%x x%x x%x x%x x%x\n",
+			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+			 irsp->ulpTimeout, rrq->xritag, rrq->rxid);
+
+	if (irsp->ulpStatus) {
+		/* Check for retry */
+		/* RRQ failed Don't print the vport to vport rjts */
+		if (irsp->ulpStatus != IOSTAT_LS_RJT ||
+			(((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
+			((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
+			(phba)->pport->cfg_log_verbose & LOG_ELS)
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2881 RRQ failure DID:%06X Status:x%x/x%x\n",
+				 ndlp->nlp_DID, irsp->ulpStatus,
+				 irsp->un.ulpWord[4]);
+	}
+out:
+	if (rrq)
+		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+	lpfc_els_free_iocb(phba, cmdiocb);
+	return;
+}
+/**
+ * lpfc_cmpl_els_plogi - Completion callback function for plogi
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function for issuing the Port
+ * Login (PLOGI) command. For PLOGI completion, there must be an active
+ * ndlp on the vport node list that matches the remote node ID from the
+ * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
+ * ignored and command IOCB released. The PLOGI response IOCB status is
+ * checked for error conditons. If there is error status reported, PLOGI
+ * retry shall be attempted by invoking the lpfc_els_retry() routine.
+ * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
+ * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
+ * (DSM) is set for this PLOGI completion. Finally, it checks whether
+ * there are additional N_Port nodes with the vport that need to perform
+ * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
+ * PLOGIs.
+ **/
+static void
+lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		    struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	IOCB_t *irsp;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_dmabuf *prsp;
+	int disc, rc, did, type;
+
+	/* we pass cmdiocb to state machine which needs rspiocb as well */
+	cmdiocb->context_un.rsp_iocb = rspiocb;
+
+	irsp = &rspiocb->iocb;
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"PLOGI cmpl:      status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4],
+		irsp->un.elsreq64.remoteID);
+
+	ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0136 PLOGI completes to NPort x%x "
+				 "with no ndlp. Data: x%x x%x x%x\n",
+				 irsp->un.elsreq64.remoteID,
+				 irsp->ulpStatus, irsp->un.ulpWord[4],
+				 irsp->ulpIoTag);
+		goto out;
+	}
+
+	/* Since ndlp can be freed in the disc state machine, note if this node
+	 * is being used during discovery.
+	 */
+	spin_lock_irq(shost->host_lock);
+	disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
+	ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+	spin_unlock_irq(shost->host_lock);
+	rc   = 0;
+
+	/* PLOGI completes to NPort <nlp_DID> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0102 PLOGI completes to NPort x%x "
+			 "Data: x%x x%x x%x x%x x%x\n",
+			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+			 irsp->ulpTimeout, disc, vport->num_disc_nodes);
+	/* Check to see if link went down during discovery */
+	if (lpfc_els_chk_latt(vport)) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+		spin_unlock_irq(shost->host_lock);
+		goto out;
+	}
+
+	/* ndlp could be freed in DSM, save these values now */
+	type = ndlp->nlp_type;
+	did = ndlp->nlp_DID;
+
+	if (irsp->ulpStatus) {
+		/* Check for retry */
+		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+			/* ELS command is being retried */
+			if (disc) {
+				spin_lock_irq(shost->host_lock);
+				ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+				spin_unlock_irq(shost->host_lock);
+			}
+			goto out;
+		}
+		/* PLOGI failed Don't print the vport to vport rjts */
+		if (irsp->ulpStatus != IOSTAT_LS_RJT ||
+			(((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
+			((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
+			(phba)->pport->cfg_log_verbose & LOG_ELS)
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
+				 ndlp->nlp_DID, irsp->ulpStatus,
+				 irsp->un.ulpWord[4]);
+		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+		if (lpfc_error_lost_link(irsp))
+			rc = NLP_STE_FREED_NODE;
+		else
+			rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+						     NLP_EVT_CMPL_PLOGI);
+	} else {
+		/* Good status, call state machine */
+		prsp = list_entry(((struct lpfc_dmabuf *)
+				   cmdiocb->context2)->list.next,
+				  struct lpfc_dmabuf, list);
+		ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
+		rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+					     NLP_EVT_CMPL_PLOGI);
+	}
+
+	if (disc && vport->num_disc_nodes) {
+		/* Check to see if there are more PLOGIs to be sent */
+		lpfc_more_plogi(vport);
+
+		if (vport->num_disc_nodes == 0) {
+			spin_lock_irq(shost->host_lock);
+			vport->fc_flag &= ~FC_NDISC_ACTIVE;
+			spin_unlock_irq(shost->host_lock);
+
+			lpfc_can_disctmo(vport);
+			lpfc_end_rscn(vport);
+		}
+	}
+
+out:
+	lpfc_els_free_iocb(phba, cmdiocb);
+	return;
+}
+
+/**
+ * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @did: destination port identifier.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a Port Login (PLOGI) command to a remote N_Port
+ * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
+ * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
+ * This routine constructs the proper feilds of the PLOGI IOCB and invokes
+ * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the PLOGI ELS command.
+ *
+ * Return code
+ *   0 - Successfully issued a plogi for @vport
+ *   1 - failed to issue a plogi for @vport
+ **/
+int
+lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct serv_parm *sp;
+	IOCB_t *icmd;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_sli *psli;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int ret;
+
+	psli = &phba->sli;
+
+	ndlp = lpfc_findnode_did(vport, did);
+	if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
+		ndlp = NULL;
+
+	/* If ndlp is not NULL, we will bump the reference count on it */
+	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
+				     ELS_CMD_PLOGI);
+	if (!elsiocb)
+		return 1;
+
+	icmd = &elsiocb->iocb;
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	/* For PLOGI request, remainder of payload is service parameters */
+	*((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
+	pcmd += sizeof(uint32_t);
+	memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
+	sp = (struct serv_parm *) pcmd;
+
+	/*
+	 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
+	 * to device on remote loops work.
+	 */
+	if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
+		sp->cmn.altBbCredit = 1;
+
+	if (sp->cmn.fcphLow < FC_PH_4_3)
+		sp->cmn.fcphLow = FC_PH_4_3;
+
+	if (sp->cmn.fcphHigh < FC_PH3)
+		sp->cmn.fcphHigh = FC_PH3;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue PLOGI:     did:x%x",
+		did, 0, 0);
+
+	phba->fc_stat.elsXmitPLOGI++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
+	ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+
+	if (ret == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_cmpl_els_prli - Completion callback function for prli
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function for a Process Login
+ * (PRLI) ELS command. The PRLI response IOCB status is checked for error
+ * status. If there is error status reported, PRLI retry shall be attempted
+ * by invoking the lpfc_els_retry() routine. Otherwise, the state
+ * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
+ * ndlp to mark the PRLI completion.
+ **/
+static void
+lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		   struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	IOCB_t *irsp;
+	struct lpfc_sli *psli;
+	struct lpfc_nodelist *ndlp;
+
+	psli = &phba->sli;
+	/* we pass cmdiocb to state machine which needs rspiocb as well */
+	cmdiocb->context_un.rsp_iocb = rspiocb;
+
+	irsp = &(rspiocb->iocb);
+	ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~NLP_PRLI_SND;
+	spin_unlock_irq(shost->host_lock);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"PRLI cmpl:       status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4],
+		ndlp->nlp_DID);
+	/* PRLI completes to NPort <nlp_DID> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0103 PRLI completes to NPort x%x "
+			 "Data: x%x x%x x%x x%x\n",
+			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+			 irsp->ulpTimeout, vport->num_disc_nodes);
+
+	vport->fc_prli_sent--;
+	/* Check to see if link went down during discovery */
+	if (lpfc_els_chk_latt(vport))
+		goto out;
+
+	if (irsp->ulpStatus) {
+		/* Check for retry */
+		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+			/* ELS command is being retried */
+			goto out;
+		}
+		/* PRLI failed */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
+				 ndlp->nlp_DID, irsp->ulpStatus,
+				 irsp->un.ulpWord[4]);
+		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+		if (lpfc_error_lost_link(irsp))
+			goto out;
+		else
+			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+						NLP_EVT_CMPL_PRLI);
+	} else
+		/* Good status, call state machine */
+		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+					NLP_EVT_CMPL_PRLI);
+out:
+	lpfc_els_free_iocb(phba, cmdiocb);
+	return;
+}
+
+/**
+ * lpfc_issue_els_prli - Issue a prli iocb command for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a Process Login (PRLI) ELS command for the
+ * @vport. The PRLI service parameters are set up in the payload of the
+ * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
+ * is put to the IOCB completion callback func field before invoking the
+ * routine lpfc_sli_issue_iocb() to send out PRLI command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the PRLI ELS command.
+ *
+ * Return code
+ *   0 - successfully issued prli iocb command for @vport
+ *   1 - failed to issue prli iocb command for @vport
+ **/
+int
+lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		    uint8_t retry)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba *phba = vport->phba;
+	PRLI *npr;
+	IOCB_t *icmd;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+
+	cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_PRLI);
+	if (!elsiocb)
+		return 1;
+
+	icmd = &elsiocb->iocb;
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	/* For PRLI request, remainder of payload is service parameters */
+	memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
+	*((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
+	pcmd += sizeof(uint32_t);
+
+	/* For PRLI, remainder of payload is PRLI parameter page */
+	npr = (PRLI *) pcmd;
+	/*
+	 * If our firmware version is 3.20 or later,
+	 * set the following bits for FC-TAPE support.
+	 */
+	if (phba->vpd.rev.feaLevelHigh >= 0x02) {
+		npr->ConfmComplAllowed = 1;
+		npr->Retry = 1;
+		npr->TaskRetryIdReq = 1;
+	}
+	npr->estabImagePair = 1;
+	npr->readXferRdyDis = 1;
+
+	/* For FCP support */
+	npr->prliType = PRLI_FCP_TYPE;
+	npr->initiatorFunc = 1;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue PRLI:      did:x%x",
+		ndlp->nlp_DID, 0, 0);
+
+	phba->fc_stat.elsXmitPRLI++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_PRLI_SND;
+	spin_unlock_irq(shost->host_lock);
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag &= ~NLP_PRLI_SND;
+		spin_unlock_irq(shost->host_lock);
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	vport->fc_prli_sent++;
+	return 0;
+}
+
+/**
+ * lpfc_rscn_disc - Perform rscn discovery for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine performs Registration State Change Notification (RSCN)
+ * discovery for a @vport. If the @vport's node port recovery count is not
+ * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
+ * the nodes that need recovery. If none of the PLOGI were needed through
+ * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
+ * invoked to check and handle possible more RSCN came in during the period
+ * of processing the current ones.
+ **/
+static void
+lpfc_rscn_disc(struct lpfc_vport *vport)
+{
+	lpfc_can_disctmo(vport);
+
+	/* RSCN discovery */
+	/* go thru NPR nodes and issue ELS PLOGIs */
+	if (vport->fc_npr_cnt)
+		if (lpfc_els_disc_plogi(vport))
+			return;
+
+	lpfc_end_rscn(vport);
+}
+
+/**
+ * lpfc_adisc_done - Complete the adisc phase of discovery
+ * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
+ *
+ * This function is called when the final ADISC is completed during discovery.
+ * This function handles clearing link attention or issuing reg_vpi depending
+ * on whether npiv is enabled. This function also kicks off the PLOGI phase of
+ * discovery.
+ * This function is called with no locks held.
+ **/
+static void
+lpfc_adisc_done(struct lpfc_vport *vport)
+{
+	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba   *phba = vport->phba;
+
+	/*
+	 * For NPIV, cmpl_reg_vpi will set port_state to READY,
+	 * and continue discovery.
+	 */
+	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+	    !(vport->fc_flag & FC_RSCN_MODE) &&
+	    (phba->sli_rev < LPFC_SLI_REV4)) {
+		lpfc_issue_reg_vpi(phba, vport);
+		return;
+	}
+	/*
+	* For SLI2, we need to set port_state to READY
+	* and continue discovery.
+	*/
+	if (vport->port_state < LPFC_VPORT_READY) {
+		/* If we get here, there is nothing to ADISC */
+		if (vport->port_type == LPFC_PHYSICAL_PORT)
+			lpfc_issue_clear_la(phba, vport);
+		if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
+			vport->num_disc_nodes = 0;
+			/* go thru NPR list, issue ELS PLOGIs */
+			if (vport->fc_npr_cnt)
+				lpfc_els_disc_plogi(vport);
+			if (!vport->num_disc_nodes) {
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag &= ~FC_NDISC_ACTIVE;
+				spin_unlock_irq(shost->host_lock);
+				lpfc_can_disctmo(vport);
+				lpfc_end_rscn(vport);
+			}
+		}
+		vport->port_state = LPFC_VPORT_READY;
+	} else
+		lpfc_rscn_disc(vport);
+}
+
+/**
+ * lpfc_more_adisc - Issue more adisc as needed
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine determines whether there are more ndlps on a @vport
+ * node list need to have Address Discover (ADISC) issued. If so, it will
+ * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
+ * remaining nodes which need to have ADISC sent.
+ **/
+void
+lpfc_more_adisc(struct lpfc_vport *vport)
+{
+	int sentadisc;
+
+	if (vport->num_disc_nodes)
+		vport->num_disc_nodes--;
+	/* Continue discovery with <num_disc_nodes> ADISCs to go */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0210 Continue discovery with %d ADISCs to go "
+			 "Data: x%x x%x x%x\n",
+			 vport->num_disc_nodes, vport->fc_adisc_cnt,
+			 vport->fc_flag, vport->port_state);
+	/* Check to see if there are more ADISCs to be sent */
+	if (vport->fc_flag & FC_NLP_MORE) {
+		lpfc_set_disctmo(vport);
+		/* go thru NPR nodes and issue any remaining ELS ADISCs */
+		sentadisc = lpfc_els_disc_adisc(vport);
+	}
+	if (!vport->num_disc_nodes)
+		lpfc_adisc_done(vport);
+	return;
+}
+
+/**
+ * lpfc_cmpl_els_adisc - Completion callback function for adisc
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion function for issuing the Address Discover
+ * (ADISC) command. It first checks to see whether link went down during
+ * the discovery process. If so, the node will be marked as node port
+ * recovery for issuing discover IOCB by the link attention handler and
+ * exit. Otherwise, the response status is checked. If error was reported
+ * in the response status, the ADISC command shall be retried by invoking
+ * the lpfc_els_retry() routine. Otherwise, if no error was reported in
+ * the response status, the state machine is invoked to set transition
+ * with respect to NLP_EVT_CMPL_ADISC event.
+ **/
+static void
+lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		    struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	IOCB_t *irsp;
+	struct lpfc_nodelist *ndlp;
+	int  disc;
+
+	/* we pass cmdiocb to state machine which needs rspiocb as well */
+	cmdiocb->context_un.rsp_iocb = rspiocb;
+
+	irsp = &(rspiocb->iocb);
+	ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"ADISC cmpl:      status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4],
+		ndlp->nlp_DID);
+
+	/* Since ndlp can be freed in the disc state machine, note if this node
+	 * is being used during discovery.
+	 */
+	spin_lock_irq(shost->host_lock);
+	disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
+	ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
+	spin_unlock_irq(shost->host_lock);
+	/* ADISC completes to NPort <nlp_DID> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0104 ADISC completes to NPort x%x "
+			 "Data: x%x x%x x%x x%x x%x\n",
+			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+			 irsp->ulpTimeout, disc, vport->num_disc_nodes);
+	/* Check to see if link went down during discovery */
+	if (lpfc_els_chk_latt(vport)) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+		spin_unlock_irq(shost->host_lock);
+		goto out;
+	}
+
+	if (irsp->ulpStatus) {
+		/* Check for retry */
+		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+			/* ELS command is being retried */
+			if (disc) {
+				spin_lock_irq(shost->host_lock);
+				ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+				spin_unlock_irq(shost->host_lock);
+				lpfc_set_disctmo(vport);
+			}
+			goto out;
+		}
+		/* ADISC failed */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
+				 ndlp->nlp_DID, irsp->ulpStatus,
+				 irsp->un.ulpWord[4]);
+		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+		if (!lpfc_error_lost_link(irsp))
+			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+						NLP_EVT_CMPL_ADISC);
+	} else
+		/* Good status, call state machine */
+		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+					NLP_EVT_CMPL_ADISC);
+
+	/* Check to see if there are more ADISCs to be sent */
+	if (disc && vport->num_disc_nodes)
+		lpfc_more_adisc(vport);
+out:
+	lpfc_els_free_iocb(phba, cmdiocb);
+	return;
+}
+
+/**
+ * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
+ * @vport: pointer to a virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues an Address Discover (ADISC) for an @ndlp on a
+ * @vport. It prepares the payload of the ADISC ELS command, updates the
+ * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
+ * to issue the ADISC ELS command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the ADISC ELS command.
+ *
+ * Return code
+ *   0 - successfully issued adisc
+ *   1 - failed to issue adisc
+ **/
+int
+lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		     uint8_t retry)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	ADISC *ap;
+	IOCB_t *icmd;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+
+	cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_ADISC);
+	if (!elsiocb)
+		return 1;
+
+	icmd = &elsiocb->iocb;
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	/* For ADISC request, remainder of payload is service parameters */
+	*((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
+	pcmd += sizeof(uint32_t);
+
+	/* Fill in ADISC payload */
+	ap = (ADISC *) pcmd;
+	ap->hardAL_PA = phba->fc_pref_ALPA;
+	memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
+	memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
+	ap->DID = be32_to_cpu(vport->fc_myDID);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue ADISC:     did:x%x",
+		ndlp->nlp_DID, 0, 0);
+
+	phba->fc_stat.elsXmitADISC++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_ADISC_SND;
+	spin_unlock_irq(shost->host_lock);
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag &= ~NLP_ADISC_SND;
+		spin_unlock_irq(shost->host_lock);
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_cmpl_els_logo - Completion callback function for logo
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion function for issuing the ELS Logout (LOGO)
+ * command. If no error status was reported from the LOGO response, the
+ * state machine of the associated ndlp shall be invoked for transition with
+ * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
+ * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
+ **/
+static void
+lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		   struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+	struct lpfc_vport *vport = ndlp->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	IOCB_t *irsp;
+	struct lpfc_sli *psli;
+	struct lpfcMboxq *mbox;
+
+	psli = &phba->sli;
+	/* we pass cmdiocb to state machine which needs rspiocb as well */
+	cmdiocb->context_un.rsp_iocb = rspiocb;
+
+	irsp = &(rspiocb->iocb);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~NLP_LOGO_SND;
+	spin_unlock_irq(shost->host_lock);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"LOGO cmpl:       status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4],
+		ndlp->nlp_DID);
+	/* LOGO completes to NPort <nlp_DID> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0105 LOGO completes to NPort x%x "
+			 "Data: x%x x%x x%x x%x\n",
+			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+			 irsp->ulpTimeout, vport->num_disc_nodes);
+	/* Check to see if link went down during discovery */
+	if (lpfc_els_chk_latt(vport))
+		goto out;
+
+	if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
+	        /* NLP_EVT_DEVICE_RM should unregister the RPI
+		 * which should abort all outstanding IOs.
+		 */
+		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+					NLP_EVT_DEVICE_RM);
+		goto out;
+	}
+
+	if (irsp->ulpStatus) {
+		/* Check for retry */
+		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
+			/* ELS command is being retried */
+			goto out;
+		/* LOGO failed */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
+				 ndlp->nlp_DID, irsp->ulpStatus,
+				 irsp->un.ulpWord[4]);
+		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+		if (lpfc_error_lost_link(irsp))
+			goto out;
+		else
+			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+						NLP_EVT_CMPL_LOGO);
+	} else
+		/* Good status, call state machine.
+		 * This will unregister the rpi if needed.
+		 */
+		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+					NLP_EVT_CMPL_LOGO);
+out:
+	lpfc_els_free_iocb(phba, cmdiocb);
+	/* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
+	if ((vport->fc_flag & FC_PT2PT) &&
+		!(vport->fc_flag & FC_PT2PT_PLOGI)) {
+		phba->pport->fc_myDID = 0;
+		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (mbox) {
+			lpfc_config_link(phba, mbox);
+			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+			mbox->vport = vport;
+			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
+				MBX_NOT_FINISHED) {
+				mempool_free(mbox, phba->mbox_mem_pool);
+			}
+		}
+	}
+	return;
+}
+
+/**
+ * lpfc_issue_els_logo - Issue a logo to an node on a vport
+ * @vport: pointer to a virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine constructs and issues an ELS Logout (LOGO) iocb command
+ * to a remote node, referred by an @ndlp on a @vport. It constructs the
+ * payload of the IOCB, properly sets up the @ndlp state, and invokes the
+ * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the LOGO ELS command.
+ *
+ * Return code
+ *   0 - successfully issued logo
+ *   1 - failed to issue logo
+ **/
+int
+lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		    uint8_t retry)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	IOCB_t *icmd;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int rc;
+
+	spin_lock_irq(shost->host_lock);
+	if (ndlp->nlp_flag & NLP_LOGO_SND) {
+		spin_unlock_irq(shost->host_lock);
+		return 0;
+	}
+	spin_unlock_irq(shost->host_lock);
+
+	cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_LOGO);
+	if (!elsiocb)
+		return 1;
+
+	icmd = &elsiocb->iocb;
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
+	pcmd += sizeof(uint32_t);
+
+	/* Fill in LOGO payload */
+	*((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
+	pcmd += sizeof(uint32_t);
+	memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue LOGO:      did:x%x",
+		ndlp->nlp_DID, 0, 0);
+
+	phba->fc_stat.elsXmitLOGO++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_LOGO_SND;
+	spin_unlock_irq(shost->host_lock);
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+
+	if (rc == IOCB_ERROR) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag &= ~NLP_LOGO_SND;
+		spin_unlock_irq(shost->host_lock);
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_cmpl_els_cmd - Completion callback function for generic els command
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is a generic completion callback function for ELS commands.
+ * Specifically, it is the callback function which does not need to perform
+ * any command specific operations. It is currently used by the ELS command
+ * issuing routines for the ELS State Change  Request (SCR),
+ * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
+ * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
+ * certain debug loggings, this callback function simply invokes the
+ * lpfc_els_chk_latt() routine to check whether link went down during the
+ * discovery process.
+ **/
+static void
+lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		  struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	IOCB_t *irsp;
+
+	irsp = &rspiocb->iocb;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"ELS cmd cmpl:    status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4],
+		irsp->un.elsreq64.remoteID);
+	/* ELS cmd tag <ulpIoTag> completes */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
+			 irsp->ulpIoTag, irsp->ulpStatus,
+			 irsp->un.ulpWord[4], irsp->ulpTimeout);
+	/* Check to see if link went down during discovery */
+	lpfc_els_chk_latt(vport);
+	lpfc_els_free_iocb(phba, cmdiocb);
+	return;
+}
+
+/**
+ * lpfc_issue_els_scr - Issue a scr to an node on a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @nportid: N_Port identifier to the remote node.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a State Change Request (SCR) to a fabric node
+ * on a @vport. The remote node @nportid is passed into the function. It
+ * first search the @vport node list to find the matching ndlp. If no such
+ * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
+ * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
+ * routine is invoked to send the SCR IOCB.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the SCR ELS command.
+ *
+ * Return code
+ *   0 - Successfully issued scr command
+ *   1 - Failed to issue scr command
+ **/
+int
+lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	IOCB_t *icmd;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_sli *psli;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	struct lpfc_nodelist *ndlp;
+
+	psli = &phba->sli;
+	cmdsize = (sizeof(uint32_t) + sizeof(SCR));
+
+	ndlp = lpfc_findnode_did(vport, nportid);
+	if (!ndlp) {
+		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+		if (!ndlp)
+			return 1;
+		lpfc_nlp_init(vport, ndlp, nportid);
+		lpfc_enqueue_node(vport, ndlp);
+	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+		if (!ndlp)
+			return 1;
+	}
+
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_SCR);
+
+	if (!elsiocb) {
+		/* This will trigger the release of the node just
+		 * allocated
+		 */
+		lpfc_nlp_put(ndlp);
+		return 1;
+	}
+
+	icmd = &elsiocb->iocb;
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	*((uint32_t *) (pcmd)) = ELS_CMD_SCR;
+	pcmd += sizeof(uint32_t);
+
+	/* For SCR, remainder of payload is SCR parameter page */
+	memset(pcmd, 0, sizeof(SCR));
+	((SCR *) pcmd)->Function = SCR_FUNC_FULL;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue SCR:       did:x%x",
+		ndlp->nlp_DID, 0, 0);
+
+	phba->fc_stat.elsXmitSCR++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
+		/* The additional lpfc_nlp_put will cause the following
+		 * lpfc_els_free_iocb routine to trigger the rlease of
+		 * the node.
+		 */
+		lpfc_nlp_put(ndlp);
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	/* This will cause the callback-function lpfc_cmpl_els_cmd to
+	 * trigger the release of node.
+	 */
+	lpfc_nlp_put(ndlp);
+	return 0;
+}
+
+/**
+ * lpfc_issue_els_farpr - Issue a farp to an node on a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @nportid: N_Port identifier to the remote node.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a Fibre Channel Address Resolution Response
+ * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
+ * is passed into the function. It first search the @vport node list to find
+ * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
+ * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
+ * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the PARPR ELS command.
+ *
+ * Return code
+ *   0 - Successfully issued farpr command
+ *   1 - Failed to issue farpr command
+ **/
+static int
+lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	IOCB_t *icmd;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_sli *psli;
+	FARP *fp;
+	uint8_t *pcmd;
+	uint32_t *lp;
+	uint16_t cmdsize;
+	struct lpfc_nodelist *ondlp;
+	struct lpfc_nodelist *ndlp;
+
+	psli = &phba->sli;
+	cmdsize = (sizeof(uint32_t) + sizeof(FARP));
+
+	ndlp = lpfc_findnode_did(vport, nportid);
+	if (!ndlp) {
+		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+		if (!ndlp)
+			return 1;
+		lpfc_nlp_init(vport, ndlp, nportid);
+		lpfc_enqueue_node(vport, ndlp);
+	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+		if (!ndlp)
+			return 1;
+	}
+
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_RNID);
+	if (!elsiocb) {
+		/* This will trigger the release of the node just
+		 * allocated
+		 */
+		lpfc_nlp_put(ndlp);
+		return 1;
+	}
+
+	icmd = &elsiocb->iocb;
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	*((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
+	pcmd += sizeof(uint32_t);
+
+	/* Fill in FARPR payload */
+	fp = (FARP *) (pcmd);
+	memset(fp, 0, sizeof(FARP));
+	lp = (uint32_t *) pcmd;
+	*lp++ = be32_to_cpu(nportid);
+	*lp++ = be32_to_cpu(vport->fc_myDID);
+	fp->Rflags = 0;
+	fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
+
+	memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
+	memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
+	ondlp = lpfc_findnode_did(vport, nportid);
+	if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
+		memcpy(&fp->OportName, &ondlp->nlp_portname,
+		       sizeof(struct lpfc_name));
+		memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
+		       sizeof(struct lpfc_name));
+	}
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue FARPR:     did:x%x",
+		ndlp->nlp_DID, 0, 0);
+
+	phba->fc_stat.elsXmitFARPR++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
+		/* The additional lpfc_nlp_put will cause the following
+		 * lpfc_els_free_iocb routine to trigger the release of
+		 * the node.
+		 */
+		lpfc_nlp_put(ndlp);
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	/* This will cause the callback-function lpfc_cmpl_els_cmd to
+	 * trigger the release of the node.
+	 */
+	lpfc_nlp_put(ndlp);
+	return 0;
+}
+
+/**
+ * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @nlp: pointer to a node-list data structure.
+ *
+ * This routine cancels the timer with a delayed IOCB-command retry for
+ * a @vport's @ndlp. It stops the timer for the delayed function retrial and
+ * removes the ELS retry event if it presents. In addition, if the
+ * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
+ * commands are sent for the @vport's nodes that require issuing discovery
+ * ADISC.
+ **/
+void
+lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_work_evt *evtp;
+
+	if (!(nlp->nlp_flag & NLP_DELAY_TMO))
+		return;
+	spin_lock_irq(shost->host_lock);
+	nlp->nlp_flag &= ~NLP_DELAY_TMO;
+	spin_unlock_irq(shost->host_lock);
+	del_timer_sync(&nlp->nlp_delayfunc);
+	nlp->nlp_last_elscmd = 0;
+	if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
+		list_del_init(&nlp->els_retry_evt.evt_listp);
+		/* Decrement nlp reference count held for the delayed retry */
+		evtp = &nlp->els_retry_evt;
+		lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
+	}
+	if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
+		spin_lock_irq(shost->host_lock);
+		nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+		spin_unlock_irq(shost->host_lock);
+		if (vport->num_disc_nodes) {
+			if (vport->port_state < LPFC_VPORT_READY) {
+				/* Check if there are more ADISCs to be sent */
+				lpfc_more_adisc(vport);
+			} else {
+				/* Check if there are more PLOGIs to be sent */
+				lpfc_more_plogi(vport);
+				if (vport->num_disc_nodes == 0) {
+					spin_lock_irq(shost->host_lock);
+					vport->fc_flag &= ~FC_NDISC_ACTIVE;
+					spin_unlock_irq(shost->host_lock);
+					lpfc_can_disctmo(vport);
+					lpfc_end_rscn(vport);
+				}
+			}
+		}
+	}
+	return;
+}
+
+/**
+ * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
+ * @ptr: holder for the pointer to the timer function associated data (ndlp).
+ *
+ * This routine is invoked by the ndlp delayed-function timer to check
+ * whether there is any pending ELS retry event(s) with the node. If not, it
+ * simply returns. Otherwise, if there is at least one ELS delayed event, it
+ * adds the delayed events to the HBA work list and invokes the
+ * lpfc_worker_wake_up() routine to wake up worker thread to process the
+ * event. Note that lpfc_nlp_get() is called before posting the event to
+ * the work list to hold reference count of ndlp so that it guarantees the
+ * reference to ndlp will still be available when the worker thread gets
+ * to the event associated with the ndlp.
+ **/
+void
+lpfc_els_retry_delay(unsigned long ptr)
+{
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
+	struct lpfc_vport *vport = ndlp->vport;
+	struct lpfc_hba   *phba = vport->phba;
+	unsigned long flags;
+	struct lpfc_work_evt  *evtp = &ndlp->els_retry_evt;
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	if (!list_empty(&evtp->evt_listp)) {
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		return;
+	}
+
+	/* We need to hold the node by incrementing the reference
+	 * count until the queued work is done
+	 */
+	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
+	if (evtp->evt_arg1) {
+		evtp->evt = LPFC_EVT_ELS_RETRY;
+		list_add_tail(&evtp->evt_listp, &phba->work_list);
+		lpfc_worker_wake_up(phba);
+	}
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	return;
+}
+
+/**
+ * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine is the worker-thread handler for processing the @ndlp delayed
+ * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
+ * the last ELS command from the associated ndlp and invokes the proper ELS
+ * function according to the delayed ELS command to retry the command.
+ **/
+void
+lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_vport *vport = ndlp->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	uint32_t cmd, did, retry;
+
+	spin_lock_irq(shost->host_lock);
+	did = ndlp->nlp_DID;
+	cmd = ndlp->nlp_last_elscmd;
+	ndlp->nlp_last_elscmd = 0;
+
+	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+		spin_unlock_irq(shost->host_lock);
+		return;
+	}
+
+	ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+	spin_unlock_irq(shost->host_lock);
+	/*
+	 * If a discovery event readded nlp_delayfunc after timer
+	 * firing and before processing the timer, cancel the
+	 * nlp_delayfunc.
+	 */
+	del_timer_sync(&ndlp->nlp_delayfunc);
+	retry = ndlp->nlp_retry;
+	ndlp->nlp_retry = 0;
+
+	switch (cmd) {
+	case ELS_CMD_FLOGI:
+		lpfc_issue_els_flogi(vport, ndlp, retry);
+		break;
+	case ELS_CMD_PLOGI:
+		if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+		}
+		break;
+	case ELS_CMD_ADISC:
+		if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+		}
+		break;
+	case ELS_CMD_PRLI:
+		if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
+		}
+		break;
+	case ELS_CMD_LOGO:
+		if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		}
+		break;
+	case ELS_CMD_FDISC:
+		if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
+			lpfc_issue_els_fdisc(vport, ndlp, retry);
+		break;
+	}
+	return;
+}
+
+/**
+ * lpfc_els_retry - Make retry decision on an els command iocb
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine makes a retry decision on an ELS command IOCB, which has
+ * failed. The following ELS IOCBs use this function for retrying the command
+ * when previously issued command responsed with error status: FLOGI, PLOGI,
+ * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
+ * returned error status, it makes the decision whether a retry shall be
+ * issued for the command, and whether a retry shall be made immediately or
+ * delayed. In the former case, the corresponding ELS command issuing-function
+ * is called to retry the command. In the later case, the ELS command shall
+ * be posted to the ndlp delayed event and delayed function timer set to the
+ * ndlp for the delayed command issusing.
+ *
+ * Return code
+ *   0 - No retry of els command is made
+ *   1 - Immediate or delayed retry of els command is made
+ **/
+static int
+lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+	       struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	IOCB_t *irsp = &rspiocb->iocb;
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	uint32_t *elscmd;
+	struct ls_rjt stat;
+	int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
+	int logerr = 0;
+	uint32_t cmd = 0;
+	uint32_t did;
+
+
+	/* Note: context2 may be 0 for internal driver abort
+	 * of delays ELS command.
+	 */
+
+	if (pcmd && pcmd->virt) {
+		elscmd = (uint32_t *) (pcmd->virt);
+		cmd = *elscmd++;
+	}
+
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+		did = ndlp->nlp_DID;
+	else {
+		/* We should only hit this case for retrying PLOGI */
+		did = irsp->un.elsreq64.remoteID;
+		ndlp = lpfc_findnode_did(vport, did);
+		if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+		    && (cmd != ELS_CMD_PLOGI))
+			return 1;
+	}
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Retry ELS:       wd7:x%x wd4:x%x did:x%x",
+		*(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
+
+	switch (irsp->ulpStatus) {
+	case IOSTAT_FCP_RSP_ERROR:
+		break;
+	case IOSTAT_REMOTE_STOP:
+		if (phba->sli_rev == LPFC_SLI_REV4) {
+			/* This IO was aborted by the target, we don't
+			 * know the rxid and because we did not send the
+			 * ABTS we cannot generate and RRQ.
+			 */
+			lpfc_set_rrq_active(phba, ndlp,
+					 cmdiocb->sli4_xritag, 0, 0);
+		}
+		break;
+	case IOSTAT_LOCAL_REJECT:
+		switch ((irsp->un.ulpWord[4] & 0xff)) {
+		case IOERR_LOOP_OPEN_FAILURE:
+			if (cmd == ELS_CMD_FLOGI) {
+				if (PCI_DEVICE_ID_HORNET ==
+					phba->pcidev->device) {
+					phba->fc_topology = LPFC_TOPOLOGY_LOOP;
+					phba->pport->fc_myDID = 0;
+					phba->alpa_map[0] = 0;
+					phba->alpa_map[1] = 0;
+				}
+			}
+			if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
+				delay = 1000;
+			retry = 1;
+			break;
+
+		case IOERR_ILLEGAL_COMMAND:
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+					 "0124 Retry illegal cmd x%x "
+					 "retry:x%x delay:x%x\n",
+					 cmd, cmdiocb->retry, delay);
+			retry = 1;
+			/* All command's retry policy */
+			maxretry = 8;
+			if (cmdiocb->retry > 2)
+				delay = 1000;
+			break;
+
+		case IOERR_NO_RESOURCES:
+			logerr = 1; /* HBA out of resources */
+			retry = 1;
+			if (cmdiocb->retry > 100)
+				delay = 100;
+			maxretry = 250;
+			break;
+
+		case IOERR_ILLEGAL_FRAME:
+			delay = 100;
+			retry = 1;
+			break;
+
+		case IOERR_SEQUENCE_TIMEOUT:
+		case IOERR_INVALID_RPI:
+			retry = 1;
+			break;
+		}
+		break;
+
+	case IOSTAT_NPORT_RJT:
+	case IOSTAT_FABRIC_RJT:
+		if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
+			retry = 1;
+			break;
+		}
+		break;
+
+	case IOSTAT_NPORT_BSY:
+	case IOSTAT_FABRIC_BSY:
+		logerr = 1; /* Fabric / Remote NPort out of resources */
+		retry = 1;
+		break;
+
+	case IOSTAT_LS_RJT:
+		stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
+		/* Added for Vendor specifc support
+		 * Just keep retrying for these Rsn / Exp codes
+		 */
+		switch (stat.un.b.lsRjtRsnCode) {
+		case LSRJT_UNABLE_TPC:
+			if (stat.un.b.lsRjtRsnCodeExp ==
+			    LSEXP_CMD_IN_PROGRESS) {
+				if (cmd == ELS_CMD_PLOGI) {
+					delay = 1000;
+					maxretry = 48;
+				}
+				retry = 1;
+				break;
+			}
+			if (stat.un.b.lsRjtRsnCodeExp ==
+			    LSEXP_CANT_GIVE_DATA) {
+				if (cmd == ELS_CMD_PLOGI) {
+					delay = 1000;
+					maxretry = 48;
+				}
+				retry = 1;
+				break;
+			}
+			if (cmd == ELS_CMD_PLOGI) {
+				delay = 1000;
+				maxretry = lpfc_max_els_tries + 1;
+				retry = 1;
+				break;
+			}
+			if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+			  (cmd == ELS_CMD_FDISC) &&
+			  (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
+				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+						 "0125 FDISC Failed (x%x). "
+						 "Fabric out of resources\n",
+						 stat.un.lsRjtError);
+				lpfc_vport_set_state(vport,
+						     FC_VPORT_NO_FABRIC_RSCS);
+			}
+			break;
+
+		case LSRJT_LOGICAL_BSY:
+			if ((cmd == ELS_CMD_PLOGI) ||
+			    (cmd == ELS_CMD_PRLI)) {
+				delay = 1000;
+				maxretry = 48;
+			} else if (cmd == ELS_CMD_FDISC) {
+				/* FDISC retry policy */
+				maxretry = 48;
+				if (cmdiocb->retry >= 32)
+					delay = 1000;
+			}
+			retry = 1;
+			break;
+
+		case LSRJT_LOGICAL_ERR:
+			/* There are some cases where switches return this
+			 * error when they are not ready and should be returning
+			 * Logical Busy. We should delay every time.
+			 */
+			if (cmd == ELS_CMD_FDISC &&
+			    stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
+				maxretry = 3;
+				delay = 1000;
+				retry = 1;
+				break;
+			}
+		case LSRJT_PROTOCOL_ERR:
+			if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+			  (cmd == ELS_CMD_FDISC) &&
+			  ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
+			  (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
+			  ) {
+				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+						 "0122 FDISC Failed (x%x). "
+						 "Fabric Detected Bad WWN\n",
+						 stat.un.lsRjtError);
+				lpfc_vport_set_state(vport,
+						     FC_VPORT_FABRIC_REJ_WWN);
+			}
+			break;
+		}
+		break;
+
+	case IOSTAT_INTERMED_RSP:
+	case IOSTAT_BA_RJT:
+		break;
+
+	default:
+		break;
+	}
+
+	if (did == FDMI_DID)
+		retry = 1;
+
+	if ((cmd == ELS_CMD_FLOGI) &&
+	    (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
+	    !lpfc_error_lost_link(irsp)) {
+		/* FLOGI retry policy */
+		retry = 1;
+		/* retry FLOGI forever */
+		maxretry = 0;
+		if (cmdiocb->retry >= 100)
+			delay = 5000;
+		else if (cmdiocb->retry >= 32)
+			delay = 1000;
+	} else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
+		/* retry FDISCs every second up to devloss */
+		retry = 1;
+		maxretry = vport->cfg_devloss_tmo;
+		delay = 1000;
+	}
+
+	cmdiocb->retry++;
+	if (maxretry && (cmdiocb->retry >= maxretry)) {
+		phba->fc_stat.elsRetryExceeded++;
+		retry = 0;
+	}
+
+	if ((vport->load_flag & FC_UNLOADING) != 0)
+		retry = 0;
+
+	if (retry) {
+		if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
+			/* Stop retrying PLOGI and FDISC if in FCF discovery */
+			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+				lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+						 "2849 Stop retry ELS command "
+						 "x%x to remote NPORT x%x, "
+						 "Data: x%x x%x\n", cmd, did,
+						 cmdiocb->retry, delay);
+				return 0;
+			}
+		}
+
+		/* Retry ELS command <elsCmd> to remote NPORT <did> */
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+				 "0107 Retry ELS command x%x to remote "
+				 "NPORT x%x Data: x%x x%x\n",
+				 cmd, did, cmdiocb->retry, delay);
+
+		if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
+			((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
+			((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) {
+			/* Don't reset timer for no resources */
+
+			/* If discovery / RSCN timer is running, reset it */
+			if (timer_pending(&vport->fc_disctmo) ||
+			    (vport->fc_flag & FC_RSCN_MODE))
+				lpfc_set_disctmo(vport);
+		}
+
+		phba->fc_stat.elsXmitRetry++;
+		if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
+			phba->fc_stat.elsDelayRetry++;
+			ndlp->nlp_retry = cmdiocb->retry;
+
+			/* delay is specified in milliseconds */
+			mod_timer(&ndlp->nlp_delayfunc,
+				jiffies + msecs_to_jiffies(delay));
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag |= NLP_DELAY_TMO;
+			spin_unlock_irq(shost->host_lock);
+
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			if (cmd == ELS_CMD_PRLI)
+				lpfc_nlp_set_state(vport, ndlp,
+					NLP_STE_REG_LOGIN_ISSUE);
+			else
+				lpfc_nlp_set_state(vport, ndlp,
+					NLP_STE_NPR_NODE);
+			ndlp->nlp_last_elscmd = cmd;
+
+			return 1;
+		}
+		switch (cmd) {
+		case ELS_CMD_FLOGI:
+			lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
+			return 1;
+		case ELS_CMD_FDISC:
+			lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
+			return 1;
+		case ELS_CMD_PLOGI:
+			if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+				ndlp->nlp_prev_state = ndlp->nlp_state;
+				lpfc_nlp_set_state(vport, ndlp,
+						   NLP_STE_PLOGI_ISSUE);
+			}
+			lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
+			return 1;
+		case ELS_CMD_ADISC:
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+			lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
+			return 1;
+		case ELS_CMD_PRLI:
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
+			lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
+			return 1;
+		case ELS_CMD_LOGO:
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+			lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
+			return 1;
+		}
+	}
+	/* No retry ELS command <elsCmd> to remote NPORT <did> */
+	if (logerr) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+			 "0137 No retry ELS command x%x to remote "
+			 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
+			 cmd, did, irsp->ulpStatus,
+			 irsp->un.ulpWord[4]);
+	}
+	else {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0108 No retry ELS command x%x to remote "
+			 "NPORT x%x Retried:%d Error:x%x/%x\n",
+			 cmd, did, cmdiocb->retry, irsp->ulpStatus,
+			 irsp->un.ulpWord[4]);
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
+ * @phba: pointer to lpfc hba data structure.
+ * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
+ *
+ * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
+ * associated with a command IOCB back to the lpfc DMA buffer pool. It first
+ * checks to see whether there is a lpfc DMA buffer associated with the
+ * response of the command IOCB. If so, it will be released before releasing
+ * the lpfc DMA buffer associated with the IOCB itself.
+ *
+ * Return code
+ *   0 - Successfully released lpfc DMA buffer (currently, always return 0)
+ **/
+static int
+lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
+{
+	struct lpfc_dmabuf *buf_ptr;
+
+	/* Free the response before processing the command. */
+	if (!list_empty(&buf_ptr1->list)) {
+		list_remove_head(&buf_ptr1->list, buf_ptr,
+				 struct lpfc_dmabuf,
+				 list);
+		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+		kfree(buf_ptr);
+	}
+	lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
+	kfree(buf_ptr1);
+	return 0;
+}
+
+/**
+ * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
+ * @phba: pointer to lpfc hba data structure.
+ * @buf_ptr: pointer to the lpfc dma buffer data structure.
+ *
+ * This routine releases the lpfc Direct Memory Access (DMA) buffer
+ * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
+ * pool.
+ *
+ * Return code
+ *   0 - Successfully released lpfc DMA buffer (currently, always return 0)
+ **/
+static int
+lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
+{
+	lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+	kfree(buf_ptr);
+	return 0;
+}
+
+/**
+ * lpfc_els_free_iocb - Free a command iocb and its associated resources
+ * @phba: pointer to lpfc hba data structure.
+ * @elsiocb: pointer to lpfc els command iocb data structure.
+ *
+ * This routine frees a command IOCB and its associated resources. The
+ * command IOCB data structure contains the reference to various associated
+ * resources, these fields must be set to NULL if the associated reference
+ * not present:
+ *   context1 - reference to ndlp
+ *   context2 - reference to cmd
+ *   context2->next - reference to rsp
+ *   context3 - reference to bpl
+ *
+ * It first properly decrements the reference count held on ndlp for the
+ * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
+ * set, it invokes the lpfc_els_free_data() routine to release the Direct
+ * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
+ * adds the DMA buffer the @phba data structure for the delayed release.
+ * If reference to the Buffer Pointer List (BPL) is present, the
+ * lpfc_els_free_bpl() routine is invoked to release the DMA memory
+ * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
+ * invoked to release the IOCB data structure back to @phba IOCBQ list.
+ *
+ * Return code
+ *   0 - Success (currently, always return 0)
+ **/
+int
+lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
+{
+	struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
+	struct lpfc_nodelist *ndlp;
+
+	ndlp = (struct lpfc_nodelist *)elsiocb->context1;
+	if (ndlp) {
+		if (ndlp->nlp_flag & NLP_DEFER_RM) {
+			lpfc_nlp_put(ndlp);
+
+			/* If the ndlp is not being used by another discovery
+			 * thread, free it.
+			 */
+			if (!lpfc_nlp_not_used(ndlp)) {
+				/* If ndlp is being used by another discovery
+				 * thread, just clear NLP_DEFER_RM
+				 */
+				ndlp->nlp_flag &= ~NLP_DEFER_RM;
+			}
+		}
+		else
+			lpfc_nlp_put(ndlp);
+		elsiocb->context1 = NULL;
+	}
+	/* context2  = cmd,  context2->next = rsp, context3 = bpl */
+	if (elsiocb->context2) {
+		if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
+			/* Firmware could still be in progress of DMAing
+			 * payload, so don't free data buffer till after
+			 * a hbeat.
+			 */
+			elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
+			buf_ptr = elsiocb->context2;
+			elsiocb->context2 = NULL;
+			if (buf_ptr) {
+				buf_ptr1 = NULL;
+				spin_lock_irq(&phba->hbalock);
+				if (!list_empty(&buf_ptr->list)) {
+					list_remove_head(&buf_ptr->list,
+						buf_ptr1, struct lpfc_dmabuf,
+						list);
+					INIT_LIST_HEAD(&buf_ptr1->list);
+					list_add_tail(&buf_ptr1->list,
+						&phba->elsbuf);
+					phba->elsbuf_cnt++;
+				}
+				INIT_LIST_HEAD(&buf_ptr->list);
+				list_add_tail(&buf_ptr->list, &phba->elsbuf);
+				phba->elsbuf_cnt++;
+				spin_unlock_irq(&phba->hbalock);
+			}
+		} else {
+			buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
+			lpfc_els_free_data(phba, buf_ptr1);
+		}
+	}
+
+	if (elsiocb->context3) {
+		buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
+		lpfc_els_free_bpl(phba, buf_ptr);
+	}
+	lpfc_sli_release_iocbq(phba, elsiocb);
+	return 0;
+}
+
+/**
+ * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function to the Logout (LOGO)
+ * Accept (ACC) Response ELS command. This routine is invoked to indicate
+ * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
+ * release the ndlp if it has the last reference remaining (reference count
+ * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
+ * field to NULL to inform the following lpfc_els_free_iocb() routine no
+ * ndlp reference count needs to be decremented. Otherwise, the ndlp
+ * reference use-count shall be decremented by the lpfc_els_free_iocb()
+ * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
+ * IOCB data structure.
+ **/
+static void
+lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		       struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+	struct lpfc_vport *vport = cmdiocb->vport;
+	IOCB_t *irsp;
+
+	irsp = &rspiocb->iocb;
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+		"ACC LOGO cmpl:   status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
+	/* ACC to LOGO completes to NPort <nlp_DID> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0109 ACC to LOGO completes to NPort x%x "
+			 "Data: x%x x%x x%x\n",
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi);
+
+	if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
+		/* NPort Recovery mode or node is just allocated */
+		if (!lpfc_nlp_not_used(ndlp)) {
+			/* If the ndlp is being used by another discovery
+			 * thread, just unregister the RPI.
+			 */
+			lpfc_unreg_rpi(vport, ndlp);
+		} else {
+			/* Indicate the node has already released, should
+			 * not reference to it from within lpfc_els_free_iocb.
+			 */
+			cmdiocb->context1 = NULL;
+		}
+	}
+
+	/*
+	 * The driver received a LOGO from the rport and has ACK'd it.
+	 * At this point, the driver is done so release the IOCB
+	 */
+	lpfc_els_free_iocb(phba, cmdiocb);
+
+	/*
+	 * Remove the ndlp reference if it's a fabric node that has
+	 * sent us an unsolicted LOGO.
+	 */
+	if (ndlp->nlp_type & NLP_FABRIC)
+		lpfc_nlp_put(ndlp);
+
+	return;
+}
+
+/**
+ * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function for unregister default
+ * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
+ * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
+ * decrements the ndlp reference count held for this completion callback
+ * function. After that, it invokes the lpfc_nlp_not_used() to check
+ * whether there is only one reference left on the ndlp. If so, it will
+ * perform one more decrement and trigger the release of the ndlp.
+ **/
+void
+lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+
+	pmb->context1 = NULL;
+	pmb->context2 = NULL;
+
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	mempool_free(pmb, phba->mbox_mem_pool);
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+		lpfc_nlp_put(ndlp);
+		/* This is the end of the default RPI cleanup logic for this
+		 * ndlp. If no other discovery threads are using this ndlp.
+		 * we should free all resources associated with it.
+		 */
+		lpfc_nlp_not_used(ndlp);
+	}
+
+	return;
+}
+
+/**
+ * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function for ELS Response IOCB
+ * command. In normal case, this callback function just properly sets the
+ * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
+ * field in the command IOCB is not NULL, the referred mailbox command will
+ * be send out, and then invokes the lpfc_els_free_iocb() routine to release
+ * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
+ * link down event occurred during the discovery, the lpfc_nlp_not_used()
+ * routine shall be invoked trying to release the ndlp if no other threads
+ * are currently referring it.
+ **/
+static void
+lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		  struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+	struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
+	struct Scsi_Host  *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
+	IOCB_t  *irsp;
+	uint8_t *pcmd;
+	LPFC_MBOXQ_t *mbox = NULL;
+	struct lpfc_dmabuf *mp = NULL;
+	uint32_t ls_rjt = 0;
+
+	irsp = &rspiocb->iocb;
+
+	if (cmdiocb->context_un.mbox)
+		mbox = cmdiocb->context_un.mbox;
+
+	/* First determine if this is a LS_RJT cmpl. Note, this callback
+	 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
+	 */
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+	    (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
+		/* A LS_RJT associated with Default RPI cleanup has its own
+		 * separate code path.
+		 */
+		if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
+			ls_rjt = 1;
+	}
+
+	/* Check to see if link went down during discovery */
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
+		if (mbox) {
+			mp = (struct lpfc_dmabuf *) mbox->context1;
+			if (mp) {
+				lpfc_mbuf_free(phba, mp->virt, mp->phys);
+				kfree(mp);
+			}
+			mempool_free(mbox, phba->mbox_mem_pool);
+		}
+		if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+		    (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
+			if (lpfc_nlp_not_used(ndlp)) {
+				ndlp = NULL;
+				/* Indicate the node has already released,
+				 * should not reference to it from within
+				 * the routine lpfc_els_free_iocb.
+				 */
+				cmdiocb->context1 = NULL;
+			}
+		goto out;
+	}
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+		"ELS rsp cmpl:    status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4],
+		cmdiocb->iocb.un.elsreq64.remoteID);
+	/* ELS response tag <ulpIoTag> completes */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0110 ELS response tag x%x completes "
+			 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
+			 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
+			 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi);
+	if (mbox) {
+		if ((rspiocb->iocb.ulpStatus == 0)
+		    && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
+			lpfc_unreg_rpi(vport, ndlp);
+			/* Increment reference count to ndlp to hold the
+			 * reference to ndlp for the callback function.
+			 */
+			mbox->context2 = lpfc_nlp_get(ndlp);
+			mbox->vport = vport;
+			if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
+				mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
+				mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+			}
+			else {
+				mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+				ndlp->nlp_prev_state = ndlp->nlp_state;
+				lpfc_nlp_set_state(vport, ndlp,
+					   NLP_STE_REG_LOGIN_ISSUE);
+			}
+			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+			    != MBX_NOT_FINISHED)
+				goto out;
+			else
+				/* Decrement the ndlp reference count we
+				 * set for this failed mailbox command.
+				 */
+				lpfc_nlp_put(ndlp);
+
+			/* ELS rsp: Cannot issue reg_login for <NPortid> */
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				"0138 ELS rsp: Cannot issue reg_login for x%x "
+				"Data: x%x x%x x%x\n",
+				ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+				ndlp->nlp_rpi);
+
+			if (lpfc_nlp_not_used(ndlp)) {
+				ndlp = NULL;
+				/* Indicate node has already been released,
+				 * should not reference to it from within
+				 * the routine lpfc_els_free_iocb.
+				 */
+				cmdiocb->context1 = NULL;
+			}
+		} else {
+			/* Do not drop node for lpfc_els_abort'ed ELS cmds */
+			if (!lpfc_error_lost_link(irsp) &&
+			    ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
+				if (lpfc_nlp_not_used(ndlp)) {
+					ndlp = NULL;
+					/* Indicate node has already been
+					 * released, should not reference
+					 * to it from within the routine
+					 * lpfc_els_free_iocb.
+					 */
+					cmdiocb->context1 = NULL;
+				}
+			}
+		}
+		mp = (struct lpfc_dmabuf *) mbox->context1;
+		if (mp) {
+			lpfc_mbuf_free(phba, mp->virt, mp->phys);
+			kfree(mp);
+		}
+		mempool_free(mbox, phba->mbox_mem_pool);
+	}
+out:
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
+		spin_unlock_irq(shost->host_lock);
+
+		/* If the node is not being used by another discovery thread,
+		 * and we are sending a reject, we are done with it.
+		 * Release driver reference count here and free associated
+		 * resources.
+		 */
+		if (ls_rjt)
+			if (lpfc_nlp_not_used(ndlp))
+				/* Indicate node has already been released,
+				 * should not reference to it from within
+				 * the routine lpfc_els_free_iocb.
+				 */
+				cmdiocb->context1 = NULL;
+	}
+
+	lpfc_els_free_iocb(phba, cmdiocb);
+	return;
+}
+
+/**
+ * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @flag: the els command code to be accepted.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @mbox: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine prepares and issues an Accept (ACC) response IOCB
+ * command. It uses the @flag to properly set up the IOCB field for the
+ * specific ACC response command to be issued and invokes the
+ * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
+ * @mbox pointer is passed in, it will be put into the context_un.mbox
+ * field of the IOCB for the completion callback function to issue the
+ * mailbox command to the HBA later when callback is invoked.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the corresponding response ELS IOCB command.
+ *
+ * Return code
+ *   0 - Successfully issued acc response
+ *   1 - Failed to issue acc response
+ **/
+int
+lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
+		 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
+		 LPFC_MBOXQ_t *mbox)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	IOCB_t *icmd;
+	IOCB_t *oldcmd;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_sli *psli;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int rc;
+	ELS_PKT *els_pkt_ptr;
+
+	psli = &phba->sli;
+	oldcmd = &oldiocb->iocb;
+
+	switch (flag) {
+	case ELS_CMD_ACC:
+		cmdsize = sizeof(uint32_t);
+		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
+					     ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+		if (!elsiocb) {
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag &= ~NLP_LOGO_ACC;
+			spin_unlock_irq(shost->host_lock);
+			return 1;
+		}
+
+		icmd = &elsiocb->iocb;
+		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
+		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+		pcmd += sizeof(uint32_t);
+
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+			"Issue ACC:       did:x%x flg:x%x",
+			ndlp->nlp_DID, ndlp->nlp_flag, 0);
+		break;
+	case ELS_CMD_PLOGI:
+		cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
+		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
+					     ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+		if (!elsiocb)
+			return 1;
+
+		icmd = &elsiocb->iocb;
+		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
+		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+		if (mbox)
+			elsiocb->context_un.mbox = mbox;
+
+		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+		pcmd += sizeof(uint32_t);
+		memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
+
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+			"Issue ACC PLOGI: did:x%x flg:x%x",
+			ndlp->nlp_DID, ndlp->nlp_flag, 0);
+		break;
+	case ELS_CMD_PRLO:
+		cmdsize = sizeof(uint32_t) + sizeof(PRLO);
+		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
+					     ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
+		if (!elsiocb)
+			return 1;
+
+		icmd = &elsiocb->iocb;
+		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
+		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+		memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
+		       sizeof(uint32_t) + sizeof(PRLO));
+		*((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
+		els_pkt_ptr = (ELS_PKT *) pcmd;
+		els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
+
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+			"Issue ACC PRLO:  did:x%x flg:x%x",
+			ndlp->nlp_DID, ndlp->nlp_flag, 0);
+		break;
+	default:
+		return 1;
+	}
+	/* Xmit ELS ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
+			 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext,
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi);
+	if (ndlp->nlp_flag & NLP_LOGO_ACC) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag &= ~NLP_LOGO_ACC;
+		spin_unlock_irq(shost->host_lock);
+		elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
+	} else {
+		elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+	}
+
+	phba->fc_stat.elsXmitACC++;
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+	if (rc == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
+ * @vport: pointer to a virtual N_Port data structure.
+ * @rejectError:
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @mbox: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine prepares and issue an Reject (RJT) response IOCB
+ * command. If a @mbox pointer is passed in, it will be put into the
+ * context_un.mbox field of the IOCB for the completion callback function
+ * to issue to the HBA later.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the reject response ELS IOCB command.
+ *
+ * Return code
+ *   0 - Successfully issued reject response
+ *   1 - Failed to issue reject response
+ **/
+int
+lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
+		    struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
+		    LPFC_MBOXQ_t *mbox)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	IOCB_t *icmd;
+	IOCB_t *oldcmd;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_sli *psli;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int rc;
+
+	psli = &phba->sli;
+	cmdsize = 2 * sizeof(uint32_t);
+	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_LS_RJT);
+	if (!elsiocb)
+		return 1;
+
+	icmd = &elsiocb->iocb;
+	oldcmd = &oldiocb->iocb;
+	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
+	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
+	pcmd += sizeof(uint32_t);
+	*((uint32_t *) (pcmd)) = rejectError;
+
+	if (mbox)
+		elsiocb->context_un.mbox = mbox;
+
+	/* Xmit ELS RJT <err> response tag <ulpIoTag> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0129 Xmit ELS RJT x%x response tag x%x "
+			 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
+			 "rpi x%x\n",
+			 rejectError, elsiocb->iotag,
+			 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
+			 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+		"Issue LS_RJT:    did:x%x flg:x%x err:x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
+
+	phba->fc_stat.elsXmitLSRJT++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+
+	if (rc == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
+ * @vport: pointer to a virtual N_Port data structure.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine prepares and issues an Accept (ACC) response to Address
+ * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
+ * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the ADISC Accept response ELS IOCB command.
+ *
+ * Return code
+ *   0 - Successfully issued acc adisc response
+ *   1 - Failed to issue adisc acc response
+ **/
+int
+lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
+		       struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	ADISC *ap;
+	IOCB_t *icmd, *oldcmd;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int rc;
+
+	cmdsize = sizeof(uint32_t) + sizeof(ADISC);
+	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_ACC);
+	if (!elsiocb)
+		return 1;
+
+	icmd = &elsiocb->iocb;
+	oldcmd = &oldiocb->iocb;
+	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
+	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+
+	/* Xmit ADISC ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0130 Xmit ADISC ACC response iotag x%x xri: "
+			 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext,
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi);
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+	pcmd += sizeof(uint32_t);
+
+	ap = (ADISC *) (pcmd);
+	ap->hardAL_PA = phba->fc_pref_ALPA;
+	memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
+	memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
+	ap->DID = be32_to_cpu(vport->fc_myDID);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+		"Issue ACC ADISC: did:x%x flg:x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+	phba->fc_stat.elsXmitACC++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+	if (rc == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
+ * @vport: pointer to a virtual N_Port data structure.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine prepares and issues an Accept (ACC) response to Process
+ * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
+ * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the PRLI Accept response ELS IOCB command.
+ *
+ * Return code
+ *   0 - Successfully issued acc prli response
+ *   1 - Failed to issue acc prli response
+ **/
+int
+lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
+		      struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	PRLI *npr;
+	lpfc_vpd_t *vpd;
+	IOCB_t *icmd;
+	IOCB_t *oldcmd;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_sli *psli;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int rc;
+
+	psli = &phba->sli;
+
+	cmdsize = sizeof(uint32_t) + sizeof(PRLI);
+	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+		ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
+	if (!elsiocb)
+		return 1;
+
+	icmd = &elsiocb->iocb;
+	oldcmd = &oldiocb->iocb;
+	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
+	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+
+	/* Xmit PRLI ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
+			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext,
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi);
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	*((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
+	pcmd += sizeof(uint32_t);
+
+	/* For PRLI, remainder of payload is PRLI parameter page */
+	memset(pcmd, 0, sizeof(PRLI));
+
+	npr = (PRLI *) pcmd;
+	vpd = &phba->vpd;
+	/*
+	 * If the remote port is a target and our firmware version is 3.20 or
+	 * later, set the following bits for FC-TAPE support.
+	 */
+	if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
+	    (vpd->rev.feaLevelHigh >= 0x02)) {
+		npr->ConfmComplAllowed = 1;
+		npr->Retry = 1;
+		npr->TaskRetryIdReq = 1;
+	}
+
+	npr->acceptRspCode = PRLI_REQ_EXECUTED;
+	npr->estabImagePair = 1;
+	npr->readXferRdyDis = 1;
+	npr->ConfmComplAllowed = 1;
+
+	npr->prliType = PRLI_FCP_TYPE;
+	npr->initiatorFunc = 1;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+		"Issue ACC PRLI:  did:x%x flg:x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+	phba->fc_stat.elsXmitACC++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+	if (rc == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
+ * @vport: pointer to a virtual N_Port data structure.
+ * @format: rnid command format.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine issues a Request Node Identification Data (RNID) Accept
+ * (ACC) response. It constructs the RNID ACC response command according to
+ * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
+ * issue the response. Note that this command does not need to hold the ndlp
+ * reference count for the callback. So, the ndlp reference count taken by
+ * the lpfc_prep_els_iocb() routine is put back and the context1 field of
+ * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
+ * there is no ndlp reference available.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function. However, for the RNID Accept Response ELS command,
+ * this is undone later by this routine after the IOCB is allocated.
+ *
+ * Return code
+ *   0 - Successfully issued acc rnid response
+ *   1 - Failed to issue acc rnid response
+ **/
+static int
+lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
+		      struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	RNID *rn;
+	IOCB_t *icmd, *oldcmd;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_sli *psli;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int rc;
+
+	psli = &phba->sli;
+	cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
+					+ (2 * sizeof(struct lpfc_name));
+	if (format)
+		cmdsize += sizeof(RNID_TOP_DISC);
+
+	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_ACC);
+	if (!elsiocb)
+		return 1;
+
+	icmd = &elsiocb->iocb;
+	oldcmd = &oldiocb->iocb;
+	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
+	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+
+	/* Xmit RNID ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext);
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+	pcmd += sizeof(uint32_t);
+
+	memset(pcmd, 0, sizeof(RNID));
+	rn = (RNID *) (pcmd);
+	rn->Format = format;
+	rn->CommonLen = (2 * sizeof(struct lpfc_name));
+	memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
+	memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
+	switch (format) {
+	case 0:
+		rn->SpecificLen = 0;
+		break;
+	case RNID_TOPOLOGY_DISC:
+		rn->SpecificLen = sizeof(RNID_TOP_DISC);
+		memcpy(&rn->un.topologyDisc.portName,
+		       &vport->fc_portname, sizeof(struct lpfc_name));
+		rn->un.topologyDisc.unitType = RNID_HBA;
+		rn->un.topologyDisc.physPort = 0;
+		rn->un.topologyDisc.attachedNodes = 0;
+		break;
+	default:
+		rn->CommonLen = 0;
+		rn->SpecificLen = 0;
+		break;
+	}
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+		"Issue ACC RNID:  did:x%x flg:x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+	phba->fc_stat.elsXmitACC++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+	if (rc == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @iocb: pointer to the lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return
+ **/
+static void
+lpfc_els_clear_rrq(struct lpfc_vport *vport,
+      struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	uint8_t *pcmd;
+	struct RRQ *rrq;
+	uint16_t rxid;
+	uint16_t xri;
+	struct lpfc_node_rrq *prrq;
+
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
+	pcmd += sizeof(uint32_t);
+	rrq = (struct RRQ *)pcmd;
+	rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
+	rxid = bf_get(rrq_rxid, rrq);
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			"2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
+			" x%x x%x\n",
+			be32_to_cpu(bf_get(rrq_did, rrq)),
+			bf_get(rrq_oxid, rrq),
+			rxid,
+			iocb->iotag, iocb->iocb.ulpContext);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+		"Clear RRQ:  did:x%x flg:x%x exchg:x%.08x",
+		ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
+	if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
+		xri = bf_get(rrq_oxid, rrq);
+	else
+		xri = rxid;
+	prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
+	if (prrq)
+		lpfc_clr_rrq_active(phba, xri, prrq);
+	return;
+}
+
+/**
+ * lpfc_els_rsp_echo_acc - Issue echo acc response
+ * @vport: pointer to a virtual N_Port data structure.
+ * @data: pointer to echo data to return in the accept.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return code
+ *   0 - Successfully issued acc echo response
+ *   1 - Failed to issue acc echo response
+ **/
+static int
+lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
+		      struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_sli *psli;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int rc;
+
+	psli = &phba->sli;
+	cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
+
+	/* The accumulated length can exceed the BPL_SIZE.  For
+	 * now, use this as the limit
+	 */
+	if (cmdsize > LPFC_BPL_SIZE)
+		cmdsize = LPFC_BPL_SIZE;
+	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_ACC);
+	if (!elsiocb)
+		return 1;
+
+	elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext;  /* Xri / rx_id */
+	elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
+
+	/* Xmit ECHO ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext);
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+	pcmd += sizeof(uint32_t);
+	memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+		"Issue ACC ECHO:  did:x%x flg:x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+	phba->fc_stat.elsXmitACC++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+	if (rc == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues Address Discover (ADISC) ELS commands to those
+ * N_Ports which are in node port recovery state and ADISC has not been issued
+ * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
+ * lpfc_issue_els_adisc() routine, the per @vport number of discover count
+ * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
+ * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
+ * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
+ * IOCBs quit for later pick up. On the other hand, after walking through
+ * all the ndlps with the @vport and there is none ADISC IOCB issued, the
+ * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
+ * no more ADISC need to be sent.
+ *
+ * Return code
+ *    The number of N_Ports with adisc issued.
+ **/
+int
+lpfc_els_disc_adisc(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp, *next_ndlp;
+	int sentadisc = 0;
+
+	/* go thru NPR nodes and issue any remaining ELS ADISCs */
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			continue;
+		if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
+		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
+		    (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+			spin_unlock_irq(shost->host_lock);
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+			lpfc_issue_els_adisc(vport, ndlp, 0);
+			sentadisc++;
+			vport->num_disc_nodes++;
+			if (vport->num_disc_nodes >=
+			    vport->cfg_discovery_threads) {
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag |= FC_NLP_MORE;
+				spin_unlock_irq(shost->host_lock);
+				break;
+			}
+		}
+	}
+	if (sentadisc == 0) {
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag &= ~FC_NLP_MORE;
+		spin_unlock_irq(shost->host_lock);
+	}
+	return sentadisc;
+}
+
+/**
+ * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
+ * which are in node port recovery state, with a @vport. Each time an ELS
+ * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
+ * the per @vport number of discover count (num_disc_nodes) shall be
+ * incremented. If the num_disc_nodes reaches a pre-configured threshold
+ * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
+ * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
+ * later pick up. On the other hand, after walking through all the ndlps with
+ * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
+ * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
+ * PLOGI need to be sent.
+ *
+ * Return code
+ *   The number of N_Ports with plogi issued.
+ **/
+int
+lpfc_els_disc_plogi(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp, *next_ndlp;
+	int sentplogi = 0;
+
+	/* go thru NPR nodes and issue any remaining ELS PLOGIs */
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			continue;
+		if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
+		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
+		    (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
+		    (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+			sentplogi++;
+			vport->num_disc_nodes++;
+			if (vport->num_disc_nodes >=
+			    vport->cfg_discovery_threads) {
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag |= FC_NLP_MORE;
+				spin_unlock_irq(shost->host_lock);
+				break;
+			}
+		}
+	}
+	if (sentplogi) {
+		lpfc_set_disctmo(vport);
+	}
+	else {
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag &= ~FC_NLP_MORE;
+		spin_unlock_irq(shost->host_lock);
+	}
+	return sentplogi;
+}
+
+/**
+ * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine cleans up any Registration State Change Notification
+ * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
+ * @vport together with the host_lock is used to prevent multiple thread
+ * trying to access the RSCN array on a same @vport at the same time.
+ **/
+void
+lpfc_els_flush_rscn(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	int i;
+
+	spin_lock_irq(shost->host_lock);
+	if (vport->fc_rscn_flush) {
+		/* Another thread is walking fc_rscn_id_list on this vport */
+		spin_unlock_irq(shost->host_lock);
+		return;
+	}
+	/* Indicate we are walking lpfc_els_flush_rscn on this vport */
+	vport->fc_rscn_flush = 1;
+	spin_unlock_irq(shost->host_lock);
+
+	for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
+		lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
+		vport->fc_rscn_id_list[i] = NULL;
+	}
+	spin_lock_irq(shost->host_lock);
+	vport->fc_rscn_id_cnt = 0;
+	vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
+	spin_unlock_irq(shost->host_lock);
+	lpfc_can_disctmo(vport);
+	/* Indicate we are done walking this fc_rscn_id_list */
+	vport->fc_rscn_flush = 0;
+}
+
+/**
+ * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @did: remote destination port identifier.
+ *
+ * This routine checks whether there is any pending Registration State
+ * Configuration Notification (RSCN) to a @did on @vport.
+ *
+ * Return code
+ *   None zero - The @did matched with a pending rscn
+ *   0 - not able to match @did with a pending rscn
+ **/
+int
+lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
+{
+	D_ID ns_did;
+	D_ID rscn_did;
+	uint32_t *lp;
+	uint32_t payload_len, i;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	ns_did.un.word = did;
+
+	/* Never match fabric nodes for RSCNs */
+	if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
+		return 0;
+
+	/* If we are doing a FULL RSCN rediscovery, match everything */
+	if (vport->fc_flag & FC_RSCN_DISCOVERY)
+		return did;
+
+	spin_lock_irq(shost->host_lock);
+	if (vport->fc_rscn_flush) {
+		/* Another thread is walking fc_rscn_id_list on this vport */
+		spin_unlock_irq(shost->host_lock);
+		return 0;
+	}
+	/* Indicate we are walking fc_rscn_id_list on this vport */
+	vport->fc_rscn_flush = 1;
+	spin_unlock_irq(shost->host_lock);
+	for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
+		lp = vport->fc_rscn_id_list[i]->virt;
+		payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
+		payload_len -= sizeof(uint32_t);	/* take off word 0 */
+		while (payload_len) {
+			rscn_did.un.word = be32_to_cpu(*lp++);
+			payload_len -= sizeof(uint32_t);
+			switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
+			case RSCN_ADDRESS_FORMAT_PORT:
+				if ((ns_did.un.b.domain == rscn_did.un.b.domain)
+				    && (ns_did.un.b.area == rscn_did.un.b.area)
+				    && (ns_did.un.b.id == rscn_did.un.b.id))
+					goto return_did_out;
+				break;
+			case RSCN_ADDRESS_FORMAT_AREA:
+				if ((ns_did.un.b.domain == rscn_did.un.b.domain)
+				    && (ns_did.un.b.area == rscn_did.un.b.area))
+					goto return_did_out;
+				break;
+			case RSCN_ADDRESS_FORMAT_DOMAIN:
+				if (ns_did.un.b.domain == rscn_did.un.b.domain)
+					goto return_did_out;
+				break;
+			case RSCN_ADDRESS_FORMAT_FABRIC:
+				goto return_did_out;
+			}
+		}
+	}
+	/* Indicate we are done with walking fc_rscn_id_list on this vport */
+	vport->fc_rscn_flush = 0;
+	return 0;
+return_did_out:
+	/* Indicate we are done with walking fc_rscn_id_list on this vport */
+	vport->fc_rscn_flush = 0;
+	return did;
+}
+
+/**
+ * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
+ * state machine for a @vport's nodes that are with pending RSCN (Registration
+ * State Change Notification).
+ *
+ * Return code
+ *   0 - Successful (currently alway return 0)
+ **/
+static int
+lpfc_rscn_recovery_check(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp = NULL;
+
+	/* Move all affected nodes by pending RSCNs to NPR state. */
+	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp) ||
+		    (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
+		    !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
+			continue;
+		lpfc_disc_state_machine(vport, ndlp, NULL,
+					NLP_EVT_DEVICE_RECOVERY);
+		lpfc_cancel_retry_delay_tmo(vport, ndlp);
+	}
+	return 0;
+}
+
+/**
+ * lpfc_send_rscn_event - Send an RSCN event to management application
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ *
+ * lpfc_send_rscn_event sends an RSCN netlink event to management
+ * applications.
+ */
+static void
+lpfc_send_rscn_event(struct lpfc_vport *vport,
+		struct lpfc_iocbq *cmdiocb)
+{
+	struct lpfc_dmabuf *pcmd;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	uint32_t *payload_ptr;
+	uint32_t payload_len;
+	struct lpfc_rscn_event_header *rscn_event_data;
+
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	payload_ptr = (uint32_t *) pcmd->virt;
+	payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
+
+	rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
+		payload_len, GFP_KERNEL);
+	if (!rscn_event_data) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+			"0147 Failed to allocate memory for RSCN event\n");
+		return;
+	}
+	rscn_event_data->event_type = FC_REG_RSCN_EVENT;
+	rscn_event_data->payload_length = payload_len;
+	memcpy(rscn_event_data->rscn_payload, payload_ptr,
+		payload_len);
+
+	fc_host_post_vendor_event(shost,
+		fc_get_event_number(),
+		sizeof(struct lpfc_els_event_header) + payload_len,
+		(char *)rscn_event_data,
+		LPFC_NL_VENDOR_ID);
+
+	kfree(rscn_event_data);
+}
+
+/**
+ * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes an unsolicited RSCN (Registration State Change
+ * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
+ * to invoke fc_host_post_event() routine to the FC transport layer. If the
+ * discover state machine is about to begin discovery, it just accepts the
+ * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
+ * contains N_Port IDs for other vports on this HBA, it just accepts the
+ * RSCN and ignore processing it. If the state machine is in the recovery
+ * state, the fc_rscn_id_list of this @vport is walked and the
+ * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
+ * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
+ * routine is invoked to handle the RSCN event.
+ *
+ * Return code
+ *   0 - Just sent the acc response
+ *   1 - Sent the acc response and waited for name server completion
+ **/
+static int
+lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		  struct lpfc_nodelist *ndlp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_dmabuf *pcmd;
+	uint32_t *lp, *datap;
+	IOCB_t *icmd;
+	uint32_t payload_len, length, nportid, *cmd;
+	int rscn_cnt;
+	int rscn_id = 0, hba_id = 0;
+	int i;
+
+	icmd = &cmdiocb->iocb;
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	lp = (uint32_t *) pcmd->virt;
+
+	payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
+	payload_len -= sizeof(uint32_t);	/* take off word 0 */
+	/* RSCN received */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0214 RSCN received Data: x%x x%x x%x x%x\n",
+			 vport->fc_flag, payload_len, *lp,
+			 vport->fc_rscn_id_cnt);
+
+	/* Send an RSCN event to the management application */
+	lpfc_send_rscn_event(vport, cmdiocb);
+
+	for (i = 0; i < payload_len/sizeof(uint32_t); i++)
+		fc_host_post_event(shost, fc_get_event_number(),
+			FCH_EVT_RSCN, lp[i]);
+
+	/* If we are about to begin discovery, just ACC the RSCN.
+	 * Discovery processing will satisfy it.
+	 */
+	if (vport->port_state <= LPFC_NS_QRY) {
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
+			ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
+
+		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+		return 0;
+	}
+
+	/* If this RSCN just contains NPortIDs for other vports on this HBA,
+	 * just ACC and ignore it.
+	 */
+	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+		!(vport->cfg_peer_port_login)) {
+		i = payload_len;
+		datap = lp;
+		while (i > 0) {
+			nportid = *datap++;
+			nportid = ((be32_to_cpu(nportid)) & Mask_DID);
+			i -= sizeof(uint32_t);
+			rscn_id++;
+			if (lpfc_find_vport_by_did(phba, nportid))
+				hba_id++;
+		}
+		if (rscn_id == hba_id) {
+			/* ALL NPortIDs in RSCN are on HBA */
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+					 "0219 Ignore RSCN "
+					 "Data: x%x x%x x%x x%x\n",
+					 vport->fc_flag, payload_len,
+					 *lp, vport->fc_rscn_id_cnt);
+			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+				"RCV RSCN vport:  did:x%x/ste:x%x flg:x%x",
+				ndlp->nlp_DID, vport->port_state,
+				ndlp->nlp_flag);
+
+			lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
+				ndlp, NULL);
+			return 0;
+		}
+	}
+
+	spin_lock_irq(shost->host_lock);
+	if (vport->fc_rscn_flush) {
+		/* Another thread is walking fc_rscn_id_list on this vport */
+		vport->fc_flag |= FC_RSCN_DISCOVERY;
+		spin_unlock_irq(shost->host_lock);
+		/* Send back ACC */
+		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+		return 0;
+	}
+	/* Indicate we are walking fc_rscn_id_list on this vport */
+	vport->fc_rscn_flush = 1;
+	spin_unlock_irq(shost->host_lock);
+	/* Get the array count after successfully have the token */
+	rscn_cnt = vport->fc_rscn_id_cnt;
+	/* If we are already processing an RSCN, save the received
+	 * RSCN payload buffer, cmdiocb->context2 to process later.
+	 */
+	if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV RSCN defer:  did:x%x/ste:x%x flg:x%x",
+			ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
+
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_RSCN_DEFERRED;
+		if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
+		    !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
+			vport->fc_flag |= FC_RSCN_MODE;
+			spin_unlock_irq(shost->host_lock);
+			if (rscn_cnt) {
+				cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
+				length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
+			}
+			if ((rscn_cnt) &&
+			    (payload_len + length <= LPFC_BPL_SIZE)) {
+				*cmd &= ELS_CMD_MASK;
+				*cmd |= cpu_to_be32(payload_len + length);
+				memcpy(((uint8_t *)cmd) + length, lp,
+				       payload_len);
+			} else {
+				vport->fc_rscn_id_list[rscn_cnt] = pcmd;
+				vport->fc_rscn_id_cnt++;
+				/* If we zero, cmdiocb->context2, the calling
+				 * routine will not try to free it.
+				 */
+				cmdiocb->context2 = NULL;
+			}
+			/* Deferred RSCN */
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+					 "0235 Deferred RSCN "
+					 "Data: x%x x%x x%x\n",
+					 vport->fc_rscn_id_cnt, vport->fc_flag,
+					 vport->port_state);
+		} else {
+			vport->fc_flag |= FC_RSCN_DISCOVERY;
+			spin_unlock_irq(shost->host_lock);
+			/* ReDiscovery RSCN */
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+					 "0234 ReDiscovery RSCN "
+					 "Data: x%x x%x x%x\n",
+					 vport->fc_rscn_id_cnt, vport->fc_flag,
+					 vport->port_state);
+		}
+		/* Indicate we are done walking fc_rscn_id_list on this vport */
+		vport->fc_rscn_flush = 0;
+		/* Send back ACC */
+		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+		/* send RECOVERY event for ALL nodes that match RSCN payload */
+		lpfc_rscn_recovery_check(vport);
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag &= ~FC_RSCN_DEFERRED;
+		spin_unlock_irq(shost->host_lock);
+		return 0;
+	}
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+		"RCV RSCN:        did:x%x/ste:x%x flg:x%x",
+		ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
+
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag |= FC_RSCN_MODE;
+	spin_unlock_irq(shost->host_lock);
+	vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
+	/* Indicate we are done walking fc_rscn_id_list on this vport */
+	vport->fc_rscn_flush = 0;
+	/*
+	 * If we zero, cmdiocb->context2, the calling routine will
+	 * not try to free it.
+	 */
+	cmdiocb->context2 = NULL;
+	lpfc_set_disctmo(vport);
+	/* Send back ACC */
+	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+	/* send RECOVERY event for ALL nodes that match RSCN payload */
+	lpfc_rscn_recovery_check(vport);
+	return lpfc_els_handle_rscn(vport);
+}
+
+/**
+ * lpfc_els_handle_rscn - Handle rscn for a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine handles the Registration State Configuration Notification
+ * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
+ * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
+ * if the ndlp to NameServer exists, a Common Transport (CT) command to the
+ * NameServer shall be issued. If CT command to the NameServer fails to be
+ * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
+ * RSCN activities with the @vport.
+ *
+ * Return code
+ *   0 - Cleaned up rscn on the @vport
+ *   1 - Wait for plogi to name server before proceed
+ **/
+int
+lpfc_els_handle_rscn(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_hba *phba = vport->phba;
+
+	/* Ignore RSCN if the port is being torn down. */
+	if (vport->load_flag & FC_UNLOADING) {
+		lpfc_els_flush_rscn(vport);
+		return 0;
+	}
+
+	/* Start timer for RSCN processing */
+	lpfc_set_disctmo(vport);
+
+	/* RSCN processed */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0215 RSCN processed Data: x%x x%x x%x x%x\n",
+			 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
+			 vport->port_state);
+
+	/* To process RSCN, first compare RSCN data with NameServer */
+	vport->fc_ns_retry = 0;
+	vport->num_disc_nodes = 0;
+
+	ndlp = lpfc_findnode_did(vport, NameServer_DID);
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp)
+	    && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+		/* Good ndlp, issue CT Request to NameServer */
+		if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
+			/* Wait for NameServer query cmpl before we can
+			   continue */
+			return 1;
+	} else {
+		/* If login to NameServer does not exist, issue one */
+		/* Good status, issue PLOGI to NameServer */
+		ndlp = lpfc_findnode_did(vport, NameServer_DID);
+		if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+			/* Wait for NameServer login cmpl before we can
+			   continue */
+			return 1;
+
+		if (ndlp) {
+			ndlp = lpfc_enable_node(vport, ndlp,
+						NLP_STE_PLOGI_ISSUE);
+			if (!ndlp) {
+				lpfc_els_flush_rscn(vport);
+				return 0;
+			}
+			ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
+		} else {
+			ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+			if (!ndlp) {
+				lpfc_els_flush_rscn(vport);
+				return 0;
+			}
+			lpfc_nlp_init(vport, ndlp, NameServer_DID);
+			ndlp->nlp_prev_state = ndlp->nlp_state;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+		}
+		ndlp->nlp_type |= NLP_FABRIC;
+		lpfc_issue_els_plogi(vport, NameServer_DID, 0);
+		/* Wait for NameServer login cmpl before we can
+		 * continue
+		 */
+		return 1;
+	}
+
+	lpfc_els_flush_rscn(vport);
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
+ * unsolicited event. An unsolicited FLOGI can be received in a point-to-
+ * point topology. As an unsolicited FLOGI should not be received in a loop
+ * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
+ * lpfc_check_sparm() routine is invoked to check the parameters in the
+ * unsolicited FLOGI. If parameters validation failed, the routine
+ * lpfc_els_rsp_reject() shall be called with reject reason code set to
+ * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
+ * FLOGI shall be compared with the Port WWN of the @vport to determine who
+ * will initiate PLOGI. The higher lexicographical value party shall has
+ * higher priority (as the winning port) and will initiate PLOGI and
+ * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
+ * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
+ * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
+ *
+ * Return code
+ *   0 - Successfully processed the unsolicited flogi
+ *   1 - Failed to process the unsolicited flogi
+ **/
+static int
+lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		   struct lpfc_nodelist *ndlp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	uint32_t *lp = (uint32_t *) pcmd->virt;
+	IOCB_t *icmd = &cmdiocb->iocb;
+	struct serv_parm *sp;
+	LPFC_MBOXQ_t *mbox;
+	struct ls_rjt stat;
+	uint32_t cmd, did;
+	int rc;
+
+	cmd = *lp++;
+	sp = (struct serv_parm *) lp;
+
+	/* FLOGI received */
+
+	lpfc_set_disctmo(vport);
+
+	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+		/* We should never receive a FLOGI in loop mode, ignore it */
+		did = icmd->un.elsreq64.remoteID;
+
+		/* An FLOGI ELS command <elsCmd> was received from DID <did> in
+		   Loop Mode */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0113 An FLOGI ELS command x%x was "
+				 "received from DID x%x in Loop Mode\n",
+				 cmd, did);
+		return 1;
+	}
+
+	did = Fabric_DID;
+
+	if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
+		/* For a FLOGI we accept, then if our portname is greater
+		 * then the remote portname we initiate Nport login.
+		 */
+
+		rc = memcmp(&vport->fc_portname, &sp->portName,
+			    sizeof(struct lpfc_name));
+
+		if (!rc) {
+			if (phba->sli_rev < LPFC_SLI_REV4) {
+				mbox = mempool_alloc(phba->mbox_mem_pool,
+						     GFP_KERNEL);
+				if (!mbox)
+					return 1;
+				lpfc_linkdown(phba);
+				lpfc_init_link(phba, mbox,
+					       phba->cfg_topology,
+					       phba->cfg_link_speed);
+				mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
+				mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+				mbox->vport = vport;
+				rc = lpfc_sli_issue_mbox(phba, mbox,
+							 MBX_NOWAIT);
+				lpfc_set_loopback_flag(phba);
+				if (rc == MBX_NOT_FINISHED)
+					mempool_free(mbox, phba->mbox_mem_pool);
+				return 1;
+			} else {
+				/* abort the flogi coming back to ourselves
+				 * due to external loopback on the port.
+				 */
+				lpfc_els_abort_flogi(phba);
+				return 0;
+			}
+		} else if (rc > 0) {	/* greater than */
+			spin_lock_irq(shost->host_lock);
+			vport->fc_flag |= FC_PT2PT_PLOGI;
+			spin_unlock_irq(shost->host_lock);
+		}
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_PT2PT;
+		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+		spin_unlock_irq(shost->host_lock);
+	} else {
+		/* Reject this request because invalid parameters */
+		stat.un.b.lsRjtRsvd0 = 0;
+		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+		stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
+		stat.un.b.vendorUnique = 0;
+		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+			NULL);
+		return 1;
+	}
+
+	/* Send back ACC */
+	lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
+
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Request Node Identification Data (RNID) IOCB
+ * received as an ELS unsolicited event. Only when the RNID specified format
+ * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
+ * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
+ * Accept (ACC) the RNID ELS command. All the other RNID formats are
+ * rejected by invoking the lpfc_els_rsp_reject() routine.
+ *
+ * Return code
+ *   0 - Successfully processed rnid iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		  struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_dmabuf *pcmd;
+	uint32_t *lp;
+	IOCB_t *icmd;
+	RNID *rn;
+	struct ls_rjt stat;
+	uint32_t cmd, did;
+
+	icmd = &cmdiocb->iocb;
+	did = icmd->un.elsreq64.remoteID;
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	lp = (uint32_t *) pcmd->virt;
+
+	cmd = *lp++;
+	rn = (RNID *) lp;
+
+	/* RNID received */
+
+	switch (rn->Format) {
+	case 0:
+	case RNID_TOPOLOGY_DISC:
+		/* Send back ACC */
+		lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
+		break;
+	default:
+		/* Reject this request because format not supported */
+		stat.un.b.lsRjtRsvd0 = 0;
+		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+		stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+		stat.un.b.vendorUnique = 0;
+		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+			NULL);
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_echo - Process an unsolicited echo iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return code
+ *   0 - Successfully processed echo iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		  struct lpfc_nodelist *ndlp)
+{
+	uint8_t *pcmd;
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
+
+	/* skip over first word of echo command to find echo data */
+	pcmd += sizeof(uint32_t);
+
+	lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes a Link Incident Report Registration(LIRR) IOCB
+ * received as an ELS unsolicited event. Currently, this function just invokes
+ * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
+ *
+ * Return code
+ *   0 - Successfully processed lirr iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		  struct lpfc_nodelist *ndlp)
+{
+	struct ls_rjt stat;
+
+	/* For now, unconditionally reject this command */
+	stat.un.b.lsRjtRsvd0 = 0;
+	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+	stat.un.b.vendorUnique = 0;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
+ * received as an ELS unsolicited event. A request to RRQ shall only
+ * be accepted if the Originator Nx_Port N_Port_ID or the Responder
+ * Nx_Port N_Port_ID of the target Exchange is the same as the
+ * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
+ * not accepted, an LS_RJT with reason code "Unable to perform
+ * command request" and reason code explanation "Invalid Originator
+ * S_ID" shall be returned. For now, we just unconditionally accept
+ * RRQ from the target.
+ **/
+static void
+lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		 struct lpfc_nodelist *ndlp)
+{
+	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+	if (vport->phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
+}
+
+/**
+ * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function for the MBX_READ_LNK_STAT
+ * mailbox command. This callback function is to actually send the Accept
+ * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
+ * collects the link statistics from the completion of the MBX_READ_LNK_STAT
+ * mailbox command, constructs the RPS response with the link statistics
+ * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
+ * response to the RPS.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ **/
+static void
+lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb;
+	IOCB_t *icmd;
+	struct RLS_RSP *rls_rsp;
+	uint8_t *pcmd;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_nodelist *ndlp;
+	uint16_t oxid;
+	uint16_t rxid;
+	uint32_t cmdsize;
+
+	mb = &pmb->u.mb;
+
+	ndlp = (struct lpfc_nodelist *) pmb->context2;
+	rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
+	oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
+	pmb->context1 = NULL;
+	pmb->context2 = NULL;
+
+	if (mb->mbxStatus) {
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return;
+	}
+
+	cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
+	mempool_free(pmb, phba->mbox_mem_pool);
+	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+				     lpfc_max_els_tries, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_ACC);
+
+	/* Decrement the ndlp reference count from previous mbox command */
+	lpfc_nlp_put(ndlp);
+
+	if (!elsiocb)
+		return;
+
+	icmd = &elsiocb->iocb;
+	icmd->ulpContext = rxid;
+	icmd->unsli3.rcvsli3.ox_id = oxid;
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+	pcmd += sizeof(uint32_t); /* Skip past command */
+	rls_rsp = (struct RLS_RSP *)pcmd;
+
+	rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
+	rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
+	rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
+	rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
+	rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
+	rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
+
+	/* Xmit ELS RLS ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+			 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
+			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext,
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi);
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+	phba->fc_stat.elsXmitACC++;
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+		lpfc_els_free_iocb(phba, elsiocb);
+}
+
+/**
+ * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function for the MBX_READ_LNK_STAT
+ * mailbox command. This callback function is to actually send the Accept
+ * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
+ * collects the link statistics from the completion of the MBX_READ_LNK_STAT
+ * mailbox command, constructs the RPS response with the link statistics
+ * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
+ * response to the RPS.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ **/
+static void
+lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb;
+	IOCB_t *icmd;
+	RPS_RSP *rps_rsp;
+	uint8_t *pcmd;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_nodelist *ndlp;
+	uint16_t status;
+	uint16_t oxid;
+	uint16_t rxid;
+	uint32_t cmdsize;
+
+	mb = &pmb->u.mb;
+
+	ndlp = (struct lpfc_nodelist *) pmb->context2;
+	rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
+	oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
+	pmb->context1 = NULL;
+	pmb->context2 = NULL;
+
+	if (mb->mbxStatus) {
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return;
+	}
+
+	cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
+	mempool_free(pmb, phba->mbox_mem_pool);
+	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+				     lpfc_max_els_tries, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_ACC);
+
+	/* Decrement the ndlp reference count from previous mbox command */
+	lpfc_nlp_put(ndlp);
+
+	if (!elsiocb)
+		return;
+
+	icmd = &elsiocb->iocb;
+	icmd->ulpContext = rxid;
+	icmd->unsli3.rcvsli3.ox_id = oxid;
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+	pcmd += sizeof(uint32_t); /* Skip past command */
+	rps_rsp = (RPS_RSP *)pcmd;
+
+	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
+		status = 0x10;
+	else
+		status = 0x8;
+	if (phba->pport->fc_flag & FC_FABRIC)
+		status |= 0x4;
+
+	rps_rsp->rsvd1 = 0;
+	rps_rsp->portStatus = cpu_to_be16(status);
+	rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
+	rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
+	rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
+	rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
+	rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
+	rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
+	/* Xmit ELS RPS ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+			 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
+			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext,
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi);
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+	phba->fc_stat.elsXmitACC++;
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+		lpfc_els_free_iocb(phba, elsiocb);
+	return;
+}
+
+/**
+ * lpfc_els_rcv_rls - Process an unsolicited rls iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Port Status (RPL) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
+ * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
+ * for reading the HBA link statistics. It is for the callback function,
+ * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
+ * to actually sending out RPL Accept (ACC) response.
+ *
+ * Return codes
+ *   0 - Successfully processed rls iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		 struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba *phba = vport->phba;
+	LPFC_MBOXQ_t *mbox;
+	struct lpfc_dmabuf *pcmd;
+	struct ls_rjt stat;
+
+	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+		/* reject the unsolicited RPS request and done with it */
+		goto reject_out;
+
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
+	if (mbox) {
+		lpfc_read_lnk_stat(phba, mbox);
+		mbox->context1 = (void *)((unsigned long)
+			((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
+			cmdiocb->iocb.ulpContext)); /* rx_id */
+		mbox->context2 = lpfc_nlp_get(ndlp);
+		mbox->vport = vport;
+		mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
+		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+			!= MBX_NOT_FINISHED)
+			/* Mbox completion will send ELS Response */
+			return 0;
+		/* Decrement reference count used for the failed mbox
+		 * command.
+		 */
+		lpfc_nlp_put(ndlp);
+		mempool_free(mbox, phba->mbox_mem_pool);
+	}
+reject_out:
+	/* issue rejection response */
+	stat.un.b.lsRjtRsvd0 = 0;
+	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+	stat.un.b.vendorUnique = 0;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Timout Value (RTV) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
+ * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
+ * Value (RTV) unsolicited IOCB event.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ * Return codes
+ *   0 - Successfully processed rtv iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		 struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct ls_rjt stat;
+	struct RTV_RSP *rtv_rsp;
+	uint8_t *pcmd;
+	struct lpfc_iocbq *elsiocb;
+	uint32_t cmdsize;
+
+
+	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+		/* reject the unsolicited RPS request and done with it */
+		goto reject_out;
+
+	cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
+	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+				     lpfc_max_els_tries, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_ACC);
+
+	if (!elsiocb)
+		return 1;
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+	pcmd += sizeof(uint32_t); /* Skip past command */
+
+	/* use the command's xri in the response */
+	elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext;  /* Xri / rx_id */
+	elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
+
+	rtv_rsp = (struct RTV_RSP *)pcmd;
+
+	/* populate RTV payload */
+	rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
+	rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
+	bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
+	bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
+	rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
+
+	/* Xmit ELS RLS ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+			 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
+			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
+			 "Data: x%x x%x x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext,
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi,
+			rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+	phba->fc_stat.elsXmitACC++;
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+		lpfc_els_free_iocb(phba, elsiocb);
+	return 0;
+
+reject_out:
+	/* issue rejection response */
+	stat.un.b.lsRjtRsvd0 = 0;
+	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+	stat.un.b.vendorUnique = 0;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+	return 0;
+}
+
+/* lpfc_els_rcv_rps - Process an unsolicited rps iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Port Status (RPS) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
+ * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
+ * for reading the HBA link statistics. It is for the callback function,
+ * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
+ * to actually sending out RPS Accept (ACC) response.
+ *
+ * Return codes
+ *   0 - Successfully processed rps iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		 struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba *phba = vport->phba;
+	uint32_t *lp;
+	uint8_t flag;
+	LPFC_MBOXQ_t *mbox;
+	struct lpfc_dmabuf *pcmd;
+	RPS *rps;
+	struct ls_rjt stat;
+
+	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+		/* reject the unsolicited RPS request and done with it */
+		goto reject_out;
+
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	lp = (uint32_t *) pcmd->virt;
+	flag = (be32_to_cpu(*lp++) & 0xf);
+	rps = (RPS *) lp;
+
+	if ((flag == 0) ||
+	    ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
+	    ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
+				    sizeof(struct lpfc_name)) == 0))) {
+
+		printk("Fix me....\n");
+		dump_stack();
+		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
+		if (mbox) {
+			lpfc_read_lnk_stat(phba, mbox);
+			mbox->context1 = (void *)((unsigned long)
+				((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
+				cmdiocb->iocb.ulpContext)); /* rx_id */
+			mbox->context2 = lpfc_nlp_get(ndlp);
+			mbox->vport = vport;
+			mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
+			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+				!= MBX_NOT_FINISHED)
+				/* Mbox completion will send ELS Response */
+				return 0;
+			/* Decrement reference count used for the failed mbox
+			 * command.
+			 */
+			lpfc_nlp_put(ndlp);
+			mempool_free(mbox, phba->mbox_mem_pool);
+		}
+	}
+
+reject_out:
+	/* issue rejection response */
+	stat.un.b.lsRjtRsvd0 = 0;
+	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+	stat.un.b.vendorUnique = 0;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+	return 0;
+}
+
+/* lpfc_issue_els_rrq - Process an unsolicited rps iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @did: DID of the target.
+ * @rrq: Pointer to the rrq struct.
+ *
+ * Build a ELS RRQ command and send it to the target. If the issue_iocb is
+ * Successful the the completion handler will clear the RRQ.
+ *
+ * Return codes
+ *   0 - Successfully sent rrq els iocb.
+ *   1 - Failed to send rrq els iocb.
+ **/
+static int
+lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			uint32_t did, struct lpfc_node_rrq *rrq)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct RRQ *els_rrq;
+	IOCB_t *icmd;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int ret;
+
+
+	if (ndlp != rrq->ndlp)
+		ndlp = rrq->ndlp;
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+		return 1;
+
+	/* If ndlp is not NULL, we will bump the reference count on it */
+	cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
+				     ELS_CMD_RRQ);
+	if (!elsiocb)
+		return 1;
+
+	icmd = &elsiocb->iocb;
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	/* For RRQ request, remainder of payload is Exchange IDs */
+	*((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
+	pcmd += sizeof(uint32_t);
+	els_rrq = (struct RRQ *) pcmd;
+
+	bf_set(rrq_oxid, els_rrq, rrq->xritag);
+	bf_set(rrq_rxid, els_rrq, rrq->rxid);
+	bf_set(rrq_did, els_rrq, vport->fc_myDID);
+	els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
+	els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
+
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue RRQ:     did:x%x",
+		did, rrq->xritag, rrq->rxid);
+	elsiocb->context_un.rrq = rrq;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
+	ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+
+	if (ret == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_send_rrq - Sends ELS RRQ if needed.
+ * @phba: pointer to lpfc hba data structure.
+ * @rrq: pointer to the active rrq.
+ *
+ * This routine will call the lpfc_issue_els_rrq if the rrq is
+ * still active for the xri. If this function returns a failure then
+ * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
+ *
+ * Returns 0 Success.
+ *         1 Failure.
+ **/
+int
+lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
+{
+	struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
+							rrq->nlp_DID);
+	if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
+		return lpfc_issue_els_rrq(rrq->vport, ndlp,
+					 rrq->nlp_DID, rrq);
+	else
+		return 1;
+}
+
+/**
+ * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdsize: size of the ELS command.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
+ * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPL Accept Response ELS command.
+ *
+ * Return code
+ *   0 - Successfully issued ACC RPL ELS command
+ *   1 - Failed to issue ACC RPL ELS command
+ **/
+static int
+lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
+		     struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba *phba = vport->phba;
+	IOCB_t *icmd, *oldcmd;
+	RPL_RSP rpl_rsp;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+
+	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_ACC);
+
+	if (!elsiocb)
+		return 1;
+
+	icmd = &elsiocb->iocb;
+	oldcmd = &oldiocb->iocb;
+	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
+	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+
+	pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+	pcmd += sizeof(uint16_t);
+	*((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
+	pcmd += sizeof(uint16_t);
+
+	/* Setup the RPL ACC payload */
+	rpl_rsp.listLen = be32_to_cpu(1);
+	rpl_rsp.index = 0;
+	rpl_rsp.port_num_blk.portNum = 0;
+	rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
+	memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
+	    sizeof(struct lpfc_name));
+	memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
+	/* Xmit ELS RPL ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0120 Xmit ELS RPL ACC response tag x%x "
+			 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
+			 "rpi x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext,
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi);
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+	phba->fc_stat.elsXmitACC++;
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Port List (RPL) IOCB received as an ELS
+ * unsolicited event. It first checks the remote port state. If the remote
+ * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
+ * invokes the lpfc_els_rsp_reject() routine to send reject response.
+ * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
+ * to accept the RPL.
+ *
+ * Return code
+ *   0 - Successfully processed rpl iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		 struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_dmabuf *pcmd;
+	uint32_t *lp;
+	uint32_t maxsize;
+	uint16_t cmdsize;
+	RPL *rpl;
+	struct ls_rjt stat;
+
+	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
+		/* issue rejection response */
+		stat.un.b.lsRjtRsvd0 = 0;
+		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+		stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+		stat.un.b.vendorUnique = 0;
+		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+			NULL);
+		/* rejected the unsolicited RPL request and done with it */
+		return 0;
+	}
+
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	lp = (uint32_t *) pcmd->virt;
+	rpl = (RPL *) (lp + 1);
+	maxsize = be32_to_cpu(rpl->maxsize);
+
+	/* We support only one port */
+	if ((rpl->index == 0) &&
+	    ((maxsize == 0) ||
+	     ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
+		cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
+	} else {
+		cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
+	}
+	lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
+
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_farp - Process an unsolicited farp request els command
+ * @vport: pointer to a virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Fibre Channel Address Resolution Protocol
+ * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
+ * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
+ * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
+ * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
+ * remote PortName is compared against the FC PortName stored in the @vport
+ * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
+ * compared against the FC NodeName stored in the @vport data structure.
+ * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
+ * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
+ * invoked to send out FARP Response to the remote node. Before sending the
+ * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
+ * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
+ * routine is invoked to log into the remote port first.
+ *
+ * Return code
+ *   0 - Either the FARP Match Mode not supported or successfully processed
+ **/
+static int
+lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		  struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_dmabuf *pcmd;
+	uint32_t *lp;
+	IOCB_t *icmd;
+	FARP *fp;
+	uint32_t cmd, cnt, did;
+
+	icmd = &cmdiocb->iocb;
+	did = icmd->un.elsreq64.remoteID;
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	lp = (uint32_t *) pcmd->virt;
+
+	cmd = *lp++;
+	fp = (FARP *) lp;
+	/* FARP-REQ received from DID <did> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0601 FARP-REQ received from DID x%x\n", did);
+	/* We will only support match on WWPN or WWNN */
+	if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
+		return 0;
+	}
+
+	cnt = 0;
+	/* If this FARP command is searching for my portname */
+	if (fp->Mflags & FARP_MATCH_PORT) {
+		if (memcmp(&fp->RportName, &vport->fc_portname,
+			   sizeof(struct lpfc_name)) == 0)
+			cnt = 1;
+	}
+
+	/* If this FARP command is searching for my nodename */
+	if (fp->Mflags & FARP_MATCH_NODE) {
+		if (memcmp(&fp->RnodeName, &vport->fc_nodename,
+			   sizeof(struct lpfc_name)) == 0)
+			cnt = 1;
+	}
+
+	if (cnt) {
+		if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
+		   (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
+			/* Log back into the node before sending the FARP. */
+			if (fp->Rflags & FARP_REQUEST_PLOGI) {
+				ndlp->nlp_prev_state = ndlp->nlp_state;
+				lpfc_nlp_set_state(vport, ndlp,
+						   NLP_STE_PLOGI_ISSUE);
+				lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+			}
+
+			/* Send a FARP response to that node */
+			if (fp->Rflags & FARP_REQUEST_FARPR)
+				lpfc_issue_els_farpr(vport, did, 0);
+		}
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Fibre Channel Address Resolution Protocol
+ * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
+ * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
+ * the FARP response request.
+ *
+ * Return code
+ *   0 - Successfully processed FARPR IOCB (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		   struct lpfc_nodelist  *ndlp)
+{
+	struct lpfc_dmabuf *pcmd;
+	uint32_t *lp;
+	IOCB_t *icmd;
+	uint32_t cmd, did;
+
+	icmd = &cmdiocb->iocb;
+	did = icmd->un.elsreq64.remoteID;
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	lp = (uint32_t *) pcmd->virt;
+
+	cmd = *lp++;
+	/* FARP-RSP received from DID <did> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0600 FARP-RSP received from DID x%x\n", did);
+	/* ACCEPT the Farp resp request */
+	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @fan_ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes a Fabric Address Notification (FAN) IOCB
+ * command received as an ELS unsolicited event. The FAN ELS command will
+ * only be processed on a physical port (i.e., the @vport represents the
+ * physical port). The fabric NodeName and PortName from the FAN IOCB are
+ * compared against those in the phba data structure. If any of those is
+ * different, the lpfc_initial_flogi() routine is invoked to initialize
+ * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
+ * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
+ * is invoked to register login to the fabric.
+ *
+ * Return code
+ *   0 - Successfully processed fan iocb (currently always return 0).
+ **/
+static int
+lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		 struct lpfc_nodelist *fan_ndlp)
+{
+	struct lpfc_hba *phba = vport->phba;
+	uint32_t *lp;
+	FAN *fp;
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
+	lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+	fp = (FAN *) ++lp;
+	/* FAN received; Fan does not have a reply sequence */
+	if ((vport == phba->pport) &&
+	    (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
+		if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
+			    sizeof(struct lpfc_name))) ||
+		    (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
+			    sizeof(struct lpfc_name)))) {
+			/* This port has switched fabrics. FLOGI is required */
+			lpfc_issue_init_vfi(vport);
+		} else {
+			/* FAN verified - skip FLOGI */
+			vport->fc_myDID = vport->fc_prevDID;
+			if (phba->sli_rev < LPFC_SLI_REV4)
+				lpfc_issue_fabric_reglogin(vport);
+			else {
+				lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+					"3138 Need register VFI: (x%x/%x)\n",
+					vport->fc_prevDID, vport->fc_myDID);
+				lpfc_issue_reg_vfi(vport);
+			}
+		}
+	}
+	return 0;
+}
+
+/**
+ * lpfc_els_timeout - Handler funciton to the els timer
+ * @ptr: holder for the timer function associated data.
+ *
+ * This routine is invoked by the ELS timer after timeout. It posts the ELS
+ * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
+ * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
+ * up the worker thread. It is for the worker thread to invoke the routine
+ * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
+ **/
+void
+lpfc_els_timeout(unsigned long ptr)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t tmo_posted;
+	unsigned long iflag;
+
+	spin_lock_irqsave(&vport->work_port_lock, iflag);
+	tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
+	if (!tmo_posted)
+		vport->work_port_events |= WORKER_ELS_TMO;
+	spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+	return;
+}
+
+
+/**
+ * lpfc_els_timeout_handler - Process an els timeout event
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine is the actual handler function that processes an ELS timeout
+ * event. It walks the ELS ring to get and abort all the IOCBs (except the
+ * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
+ * invoking the lpfc_sli_issue_abort_iotag() routine.
+ **/
+void
+lpfc_els_timeout_handler(struct lpfc_vport *vport)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_sli_ring *pring;
+	struct lpfc_iocbq *tmp_iocb, *piocb;
+	IOCB_t *cmd = NULL;
+	struct lpfc_dmabuf *pcmd;
+	uint32_t els_command = 0;
+	uint32_t timeout;
+	uint32_t remote_ID = 0xffffffff;
+	LIST_HEAD(txcmplq_completions);
+	LIST_HEAD(abort_list);
+
+
+	timeout = (uint32_t)(phba->fc_ratov << 1);
+
+	pring = &phba->sli.ring[LPFC_ELS_RING];
+
+	spin_lock_irq(&phba->hbalock);
+	list_splice_init(&pring->txcmplq, &txcmplq_completions);
+	spin_unlock_irq(&phba->hbalock);
+
+	list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) {
+		cmd = &piocb->iocb;
+
+		if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
+		    piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
+		    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+			continue;
+
+		if (piocb->vport != vport)
+			continue;
+
+		pcmd = (struct lpfc_dmabuf *) piocb->context2;
+		if (pcmd)
+			els_command = *(uint32_t *) (pcmd->virt);
+
+		if (els_command == ELS_CMD_FARP ||
+		    els_command == ELS_CMD_FARPR ||
+		    els_command == ELS_CMD_FDISC)
+			continue;
+
+		if (piocb->drvrTimeout > 0) {
+			if (piocb->drvrTimeout >= timeout)
+				piocb->drvrTimeout -= timeout;
+			else
+				piocb->drvrTimeout = 0;
+			continue;
+		}
+
+		remote_ID = 0xffffffff;
+		if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
+			remote_ID = cmd->un.elsreq64.remoteID;
+		else {
+			struct lpfc_nodelist *ndlp;
+			ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
+			if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+				remote_ID = ndlp->nlp_DID;
+		}
+		list_add_tail(&piocb->dlist, &abort_list);
+	}
+	spin_lock_irq(&phba->hbalock);
+	list_splice(&txcmplq_completions, &pring->txcmplq);
+	spin_unlock_irq(&phba->hbalock);
+
+	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+			 "0127 ELS timeout Data: x%x x%x x%x "
+			 "x%x\n", els_command,
+			 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
+		spin_lock_irq(&phba->hbalock);
+		list_del_init(&piocb->dlist);
+		lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+		spin_unlock_irq(&phba->hbalock);
+	}
+
+	if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
+		mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
+}
+
+/**
+ * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine is used to clean up all the outstanding ELS commands on a
+ * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
+ * routine. After that, it walks the ELS transmit queue to remove all the
+ * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
+ * the IOCBs with a non-NULL completion callback function, the callback
+ * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
+ * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
+ * callback function, the IOCB will simply be released. Finally, it walks
+ * the ELS transmit completion queue to issue an abort IOCB to any transmit
+ * completion queue IOCB that is associated with the @vport and is not
+ * an IOCB from libdfc (i.e., the management plane IOCBs that are not
+ * part of the discovery state machine) out to HBA by invoking the
+ * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
+ * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
+ * the IOCBs are aborted when this function returns.
+ **/
+void
+lpfc_els_flush_cmd(struct lpfc_vport *vport)
+{
+	LIST_HEAD(completions);
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+	struct lpfc_iocbq *tmp_iocb, *piocb;
+	IOCB_t *cmd = NULL;
+
+	lpfc_fabric_abort_vport(vport);
+
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
+		cmd = &piocb->iocb;
+
+		if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
+			continue;
+		}
+
+		/* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
+		if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
+		    cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
+		    cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
+		    cmd->ulpCommand == CMD_ABORT_XRI_CN)
+			continue;
+
+		if (piocb->vport != vport)
+			continue;
+
+		list_move_tail(&piocb->list, &completions);
+		pring->txq_cnt--;
+	}
+
+	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+		if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
+			continue;
+		}
+
+		if (piocb->vport != vport)
+			continue;
+
+		lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Cancell all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_ABORTED);
+
+	return;
+}
+
+/**
+ * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is used to clean up all the outstanding ELS commands on a
+ * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
+ * routine. After that, it walks the ELS transmit queue to remove all the
+ * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
+ * the IOCBs with the completion callback function associated, the callback
+ * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
+ * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
+ * callback function associated, the IOCB will simply be released. Finally,
+ * it walks the ELS transmit completion queue to issue an abort IOCB to any
+ * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
+ * management plane IOCBs that are not part of the discovery state machine)
+ * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
+ **/
+void
+lpfc_els_flush_all_cmd(struct lpfc_hba  *phba)
+{
+	LIST_HEAD(completions);
+	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+	struct lpfc_iocbq *tmp_iocb, *piocb;
+	IOCB_t *cmd = NULL;
+
+	lpfc_fabric_abort_hba(phba);
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
+		cmd = &piocb->iocb;
+		if (piocb->iocb_flag & LPFC_IO_LIBDFC)
+			continue;
+		/* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
+		if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
+		    cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
+		    cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
+		    cmd->ulpCommand == CMD_ABORT_XRI_CN)
+			continue;
+		list_move_tail(&piocb->list, &completions);
+		pring->txq_cnt--;
+	}
+	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+		if (piocb->iocb_flag & LPFC_IO_LIBDFC)
+			continue;
+		lpfc_sli_issue_abort_iotag(phba, pring, piocb);
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_ABORTED);
+
+	return;
+}
+
+/**
+ * lpfc_send_els_failure_event - Posts an ELS command failure event
+ * @phba: Pointer to hba context object.
+ * @cmdiocbp: Pointer to command iocb which reported error.
+ * @rspiocbp: Pointer to response iocb which reported error.
+ *
+ * This function sends an event when there is an ELS command
+ * failure.
+ **/
+void
+lpfc_send_els_failure_event(struct lpfc_hba *phba,
+			struct lpfc_iocbq *cmdiocbp,
+			struct lpfc_iocbq *rspiocbp)
+{
+	struct lpfc_vport *vport = cmdiocbp->vport;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_lsrjt_event lsrjt_event;
+	struct lpfc_fabric_event_header fabric_event;
+	struct ls_rjt stat;
+	struct lpfc_nodelist *ndlp;
+	uint32_t *pcmd;
+
+	ndlp = cmdiocbp->context1;
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+		return;
+
+	if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
+		lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
+		lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
+		memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
+			sizeof(struct lpfc_name));
+		memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
+			sizeof(struct lpfc_name));
+		pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
+			cmdiocbp->context2)->virt);
+		lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
+		stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
+		lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
+		lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
+		fc_host_post_vendor_event(shost,
+			fc_get_event_number(),
+			sizeof(lsrjt_event),
+			(char *)&lsrjt_event,
+			LPFC_NL_VENDOR_ID);
+		return;
+	}
+	if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
+		(rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
+		fabric_event.event_type = FC_REG_FABRIC_EVENT;
+		if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
+			fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
+		else
+			fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
+		memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
+			sizeof(struct lpfc_name));
+		memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
+			sizeof(struct lpfc_name));
+		fc_host_post_vendor_event(shost,
+			fc_get_event_number(),
+			sizeof(fabric_event),
+			(char *)&fabric_event,
+			LPFC_NL_VENDOR_ID);
+		return;
+	}
+
+}
+
+/**
+ * lpfc_send_els_event - Posts unsolicited els event
+ * @vport: Pointer to vport object.
+ * @ndlp: Pointer FC node object.
+ * @cmd: ELS command code.
+ *
+ * This function posts an event when there is an incoming
+ * unsolicited ELS command.
+ **/
+static void
+lpfc_send_els_event(struct lpfc_vport *vport,
+		    struct lpfc_nodelist *ndlp,
+		    uint32_t *payload)
+{
+	struct lpfc_els_event_header *els_data = NULL;
+	struct lpfc_logo_event *logo_data = NULL;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (*payload == ELS_CMD_LOGO) {
+		logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
+		if (!logo_data) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				"0148 Failed to allocate memory "
+				"for LOGO event\n");
+			return;
+		}
+		els_data = &logo_data->header;
+	} else {
+		els_data = kmalloc(sizeof(struct lpfc_els_event_header),
+			GFP_KERNEL);
+		if (!els_data) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				"0149 Failed to allocate memory "
+				"for ELS event\n");
+			return;
+		}
+	}
+	els_data->event_type = FC_REG_ELS_EVENT;
+	switch (*payload) {
+	case ELS_CMD_PLOGI:
+		els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
+		break;
+	case ELS_CMD_PRLO:
+		els_data->subcategory = LPFC_EVENT_PRLO_RCV;
+		break;
+	case ELS_CMD_ADISC:
+		els_data->subcategory = LPFC_EVENT_ADISC_RCV;
+		break;
+	case ELS_CMD_LOGO:
+		els_data->subcategory = LPFC_EVENT_LOGO_RCV;
+		/* Copy the WWPN in the LOGO payload */
+		memcpy(logo_data->logo_wwpn, &payload[2],
+			sizeof(struct lpfc_name));
+		break;
+	default:
+		kfree(els_data);
+		return;
+	}
+	memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
+	memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
+	if (*payload == ELS_CMD_LOGO) {
+		fc_host_post_vendor_event(shost,
+			fc_get_event_number(),
+			sizeof(struct lpfc_logo_event),
+			(char *)logo_data,
+			LPFC_NL_VENDOR_ID);
+		kfree(logo_data);
+	} else {
+		fc_host_post_vendor_event(shost,
+			fc_get_event_number(),
+			sizeof(struct lpfc_els_event_header),
+			(char *)els_data,
+			LPFC_NL_VENDOR_ID);
+		kfree(els_data);
+	}
+
+	return;
+}
+
+
+/**
+ * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a SLI ring.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @elsiocb: pointer to lpfc els command iocb data structure.
+ *
+ * This routine is used for processing the IOCB associated with a unsolicited
+ * event. It first determines whether there is an existing ndlp that matches
+ * the DID from the unsolicited IOCB. If not, it will create a new one with
+ * the DID from the unsolicited IOCB. The ELS command from the unsolicited
+ * IOCB is then used to invoke the proper routine and to set up proper state
+ * of the discovery state machine.
+ **/
+static void
+lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+		      struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
+{
+	struct Scsi_Host  *shost;
+	struct lpfc_nodelist *ndlp;
+	struct ls_rjt stat;
+	uint32_t *payload;
+	uint32_t cmd, did, newnode, rjt_err = 0;
+	IOCB_t *icmd = &elsiocb->iocb;
+
+	if (!vport || !(elsiocb->context2))
+		goto dropit;
+
+	newnode = 0;
+	payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
+	cmd = *payload;
+	if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
+		lpfc_post_buffer(phba, pring, 1);
+
+	did = icmd->un.rcvels.remoteID;
+	if (icmd->ulpStatus) {
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV Unsol ELS:  status:x%x/x%x did:x%x",
+			icmd->ulpStatus, icmd->un.ulpWord[4], did);
+		goto dropit;
+	}
+
+	/* Check to see if link went down during discovery */
+	if (lpfc_els_chk_latt(vport))
+		goto dropit;
+
+	/* Ignore traffic received during vport shutdown. */
+	if (vport->load_flag & FC_UNLOADING)
+		goto dropit;
+
+	/* If NPort discovery is delayed drop incoming ELS */
+	if ((vport->fc_flag & FC_DISC_DELAYED) &&
+			(cmd != ELS_CMD_PLOGI))
+		goto dropit;
+
+	ndlp = lpfc_findnode_did(vport, did);
+	if (!ndlp) {
+		/* Cannot find existing Fabric ndlp, so allocate a new one */
+		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+		if (!ndlp)
+			goto dropit;
+
+		lpfc_nlp_init(vport, ndlp, did);
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		newnode = 1;
+		if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
+			ndlp->nlp_type |= NLP_FABRIC;
+	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+		ndlp = lpfc_enable_node(vport, ndlp,
+					NLP_STE_UNUSED_NODE);
+		if (!ndlp)
+			goto dropit;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		newnode = 1;
+		if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
+			ndlp->nlp_type |= NLP_FABRIC;
+	} else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+		/* This is similar to the new node path */
+		ndlp = lpfc_nlp_get(ndlp);
+		if (!ndlp)
+			goto dropit;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		newnode = 1;
+	}
+
+	phba->fc_stat.elsRcvFrame++;
+
+	elsiocb->context1 = lpfc_nlp_get(ndlp);
+	elsiocb->vport = vport;
+
+	if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
+		cmd &= ELS_CMD_MASK;
+	}
+	/* ELS command <elsCmd> received from NPORT <did> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0112 ELS command x%x received from NPORT x%x "
+			 "Data: x%x\n", cmd, did, vport->port_state);
+	switch (cmd) {
+	case ELS_CMD_PLOGI:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV PLOGI:       did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvPLOGI++;
+		ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
+
+		lpfc_send_els_event(vport, ndlp, payload);
+
+		/* If Nport discovery is delayed, reject PLOGIs */
+		if (vport->fc_flag & FC_DISC_DELAYED) {
+			rjt_err = LSRJT_UNABLE_TPC;
+			break;
+		}
+		if (vport->port_state < LPFC_DISC_AUTH) {
+			if (!(phba->pport->fc_flag & FC_PT2PT) ||
+				(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
+				rjt_err = LSRJT_UNABLE_TPC;
+				break;
+			}
+			/* We get here, and drop thru, if we are PT2PT with
+			 * another NPort and the other side has initiated
+			 * the PLOGI before responding to our FLOGI.
+			 */
+		}
+
+		shost = lpfc_shost_from_vport(vport);
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
+		spin_unlock_irq(shost->host_lock);
+
+		lpfc_disc_state_machine(vport, ndlp, elsiocb,
+					NLP_EVT_RCV_PLOGI);
+
+		break;
+	case ELS_CMD_FLOGI:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV FLOGI:       did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvFLOGI++;
+		lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	case ELS_CMD_LOGO:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV LOGO:        did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvLOGO++;
+		lpfc_send_els_event(vport, ndlp, payload);
+		if (vport->port_state < LPFC_DISC_AUTH) {
+			rjt_err = LSRJT_UNABLE_TPC;
+			break;
+		}
+		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
+		break;
+	case ELS_CMD_PRLO:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV PRLO:        did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvPRLO++;
+		lpfc_send_els_event(vport, ndlp, payload);
+		if (vport->port_state < LPFC_DISC_AUTH) {
+			rjt_err = LSRJT_UNABLE_TPC;
+			break;
+		}
+		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
+		break;
+	case ELS_CMD_RSCN:
+		phba->fc_stat.elsRcvRSCN++;
+		lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	case ELS_CMD_ADISC:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV ADISC:       did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		lpfc_send_els_event(vport, ndlp, payload);
+		phba->fc_stat.elsRcvADISC++;
+		if (vport->port_state < LPFC_DISC_AUTH) {
+			rjt_err = LSRJT_UNABLE_TPC;
+			break;
+		}
+		lpfc_disc_state_machine(vport, ndlp, elsiocb,
+					NLP_EVT_RCV_ADISC);
+		break;
+	case ELS_CMD_PDISC:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV PDISC:       did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvPDISC++;
+		if (vport->port_state < LPFC_DISC_AUTH) {
+			rjt_err = LSRJT_UNABLE_TPC;
+			break;
+		}
+		lpfc_disc_state_machine(vport, ndlp, elsiocb,
+					NLP_EVT_RCV_PDISC);
+		break;
+	case ELS_CMD_FARPR:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV FARPR:       did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvFARPR++;
+		lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
+		break;
+	case ELS_CMD_FARP:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV FARP:        did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvFARP++;
+		lpfc_els_rcv_farp(vport, elsiocb, ndlp);
+		break;
+	case ELS_CMD_FAN:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV FAN:         did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvFAN++;
+		lpfc_els_rcv_fan(vport, elsiocb, ndlp);
+		break;
+	case ELS_CMD_PRLI:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV PRLI:        did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvPRLI++;
+		if (vport->port_state < LPFC_DISC_AUTH) {
+			rjt_err = LSRJT_UNABLE_TPC;
+			break;
+		}
+		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
+		break;
+	case ELS_CMD_LIRR:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV LIRR:        did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvLIRR++;
+		lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	case ELS_CMD_RLS:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV RLS:         did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvRLS++;
+		lpfc_els_rcv_rls(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	case ELS_CMD_RPS:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV RPS:         did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvRPS++;
+		lpfc_els_rcv_rps(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	case ELS_CMD_RPL:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV RPL:         did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvRPL++;
+		lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	case ELS_CMD_RNID:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV RNID:        did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvRNID++;
+		lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	case ELS_CMD_RTV:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV RTV:        did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+		phba->fc_stat.elsRcvRTV++;
+		lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	case ELS_CMD_RRQ:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV RRQ:         did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvRRQ++;
+		lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	case ELS_CMD_ECHO:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV ECHO:        did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvECHO++;
+		lpfc_els_rcv_echo(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	default:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV ELS cmd:     cmd:x%x did:x%x/ste:x%x",
+			cmd, did, vport->port_state);
+
+		/* Unsupported ELS command, reject */
+		rjt_err = LSRJT_CMD_UNSUPPORTED;
+
+		/* Unknown ELS command <elsCmd> received from NPORT <did> */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0115 Unknown ELS command x%x "
+				 "received from NPORT x%x\n", cmd, did);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
+	}
+
+	/* check if need to LS_RJT received ELS cmd */
+	if (rjt_err) {
+		memset(&stat, 0, sizeof(stat));
+		stat.un.b.lsRjtRsnCode = rjt_err;
+		stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
+			NULL);
+	}
+
+	lpfc_nlp_put(elsiocb->context1);
+	elsiocb->context1 = NULL;
+	return;
+
+dropit:
+	if (vport && !(vport->load_flag & FC_UNLOADING))
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+			"0111 Dropping received ELS cmd "
+			"Data: x%x x%x x%x\n",
+			icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
+	phba->fc_stat.elsRcvDrop++;
+}
+
+/**
+ * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a SLI ring.
+ * @elsiocb: pointer to lpfc els iocb data structure.
+ *
+ * This routine is used to process an unsolicited event received from a SLI
+ * (Service Level Interface) ring. The actual processing of the data buffer
+ * associated with the unsolicited event is done by invoking the routine
+ * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
+ * SLI ring on which the unsolicited event was received.
+ **/
+void
+lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+		     struct lpfc_iocbq *elsiocb)
+{
+	struct lpfc_vport *vport = phba->pport;
+	IOCB_t *icmd = &elsiocb->iocb;
+	dma_addr_t paddr;
+	struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
+	struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
+
+	elsiocb->context1 = NULL;
+	elsiocb->context2 = NULL;
+	elsiocb->context3 = NULL;
+
+	if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
+		lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
+	} else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
+	    (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) {
+		phba->fc_stat.NoRcvBuf++;
+		/* Not enough posted buffers; Try posting more buffers */
+		if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
+			lpfc_post_buffer(phba, pring, 0);
+		return;
+	}
+
+	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+	    (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
+	     icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
+		if (icmd->unsli3.rcvsli3.vpi == 0xffff)
+			vport = phba->pport;
+		else
+			vport = lpfc_find_vport_by_vpid(phba,
+						icmd->unsli3.rcvsli3.vpi);
+	}
+
+	/* If there are no BDEs associated
+	 * with this IOCB, there is nothing to do.
+	 */
+	if (icmd->ulpBdeCount == 0)
+		return;
+
+	/* type of ELS cmd is first 32bit word
+	 * in packet
+	 */
+	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+		elsiocb->context2 = bdeBuf1;
+	} else {
+		paddr = getPaddr(icmd->un.cont64[0].addrHigh,
+				 icmd->un.cont64[0].addrLow);
+		elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
+							     paddr);
+	}
+
+	lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
+	/*
+	 * The different unsolicited event handlers would tell us
+	 * if they are done with "mp" by setting context2 to NULL.
+	 */
+	if (elsiocb->context2) {
+		lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
+		elsiocb->context2 = NULL;
+	}
+
+	/* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
+	if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
+	    icmd->ulpBdeCount == 2) {
+		elsiocb->context2 = bdeBuf2;
+		lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
+		/* free mp if we are done with it */
+		if (elsiocb->context2) {
+			lpfc_in_buf_free(phba, elsiocb->context2);
+			elsiocb->context2 = NULL;
+		}
+	}
+}
+
+/**
+ * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
+ * @phba: pointer to lpfc hba data structure.
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine issues a Port Login (PLOGI) to the Name Server with
+ * State Change Request (SCR) for a @vport. This routine will create an
+ * ndlp for the Name Server associated to the @vport if such node does
+ * not already exist. The PLOGI to Name Server is issued by invoking the
+ * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
+ * (FDMI) is configured to the @vport, a FDMI node will be created and
+ * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
+ **/
+void
+lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp, *ndlp_fdmi;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	/*
+	 * If lpfc_delay_discovery parameter is set and the clean address
+	 * bit is cleared and fc fabric parameters chenged, delay FC NPort
+	 * discovery.
+	 */
+	spin_lock_irq(shost->host_lock);
+	if (vport->fc_flag & FC_DISC_DELAYED) {
+		spin_unlock_irq(shost->host_lock);
+		mod_timer(&vport->delayed_disc_tmo,
+			jiffies + HZ * phba->fc_ratov);
+		return;
+	}
+	spin_unlock_irq(shost->host_lock);
+
+	ndlp = lpfc_findnode_did(vport, NameServer_DID);
+	if (!ndlp) {
+		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+		if (!ndlp) {
+			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+				lpfc_disc_start(vport);
+				return;
+			}
+			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+					 "0251 NameServer login: no memory\n");
+			return;
+		}
+		lpfc_nlp_init(vport, ndlp, NameServer_DID);
+	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+		if (!ndlp) {
+			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+				lpfc_disc_start(vport);
+				return;
+			}
+			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+					"0348 NameServer login: node freed\n");
+			return;
+		}
+	}
+	ndlp->nlp_type |= NLP_FABRIC;
+
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+
+	if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0252 Cannot issue NameServer login\n");
+		return;
+	}
+
+	if (vport->cfg_fdmi_on) {
+		/* If this is the first time, allocate an ndlp and initialize
+		 * it. Otherwise, make sure the node is enabled and then do the
+		 * login.
+		 */
+		ndlp_fdmi = lpfc_findnode_did(vport, FDMI_DID);
+		if (!ndlp_fdmi) {
+			ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
+						  GFP_KERNEL);
+			if (ndlp_fdmi) {
+				lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
+				ndlp_fdmi->nlp_type |= NLP_FABRIC;
+			} else
+				return;
+		}
+		if (!NLP_CHK_NODE_ACT(ndlp_fdmi))
+			ndlp_fdmi = lpfc_enable_node(vport,
+						     ndlp_fdmi,
+						     NLP_STE_NPR_NODE);
+
+		if (ndlp_fdmi) {
+			lpfc_nlp_set_state(vport, ndlp_fdmi,
+					   NLP_STE_PLOGI_ISSUE);
+			lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, 0);
+		}
+	}
+}
+
+/**
+ * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function to register new vport
+ * mailbox command. If the new vport mailbox command completes successfully,
+ * the fabric registration login shall be performed on physical port (the
+ * new vport created is actually a physical port, with VPI 0) or the port
+ * login to Name Server for State Change Request (SCR) will be performed
+ * on virtual port (real virtual port, with VPI greater than 0).
+ **/
+static void
+lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_vport *vport = pmb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+	MAILBOX_t *mb = &pmb->u.mb;
+	int rc;
+
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+	spin_unlock_irq(shost->host_lock);
+
+	if (mb->mbxStatus) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+				"0915 Register VPI failed : Status: x%x"
+				" upd bit: x%x \n", mb->mbxStatus,
+				 mb->un.varRegVpi.upd);
+		if (phba->sli_rev == LPFC_SLI_REV4 &&
+			mb->un.varRegVpi.upd)
+			goto mbox_err_exit ;
+
+		switch (mb->mbxStatus) {
+		case 0x11:	/* unsupported feature */
+		case 0x9603:	/* max_vpi exceeded */
+		case 0x9602:	/* Link event since CLEAR_LA */
+			/* giving up on vport registration */
+			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+			spin_lock_irq(shost->host_lock);
+			vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+			spin_unlock_irq(shost->host_lock);
+			lpfc_can_disctmo(vport);
+			break;
+		/* If reg_vpi fail with invalid VPI status, re-init VPI */
+		case 0x20:
+			spin_lock_irq(shost->host_lock);
+			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+			spin_unlock_irq(shost->host_lock);
+			lpfc_init_vpi(phba, pmb, vport->vpi);
+			pmb->vport = vport;
+			pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
+			rc = lpfc_sli_issue_mbox(phba, pmb,
+				MBX_NOWAIT);
+			if (rc == MBX_NOT_FINISHED) {
+				lpfc_printf_vlog(vport,
+					KERN_ERR, LOG_MBOX,
+					"2732 Failed to issue INIT_VPI"
+					" mailbox command\n");
+			} else {
+				lpfc_nlp_put(ndlp);
+				return;
+			}
+
+		default:
+			/* Try to recover from this error */
+			if (phba->sli_rev == LPFC_SLI_REV4)
+				lpfc_sli4_unreg_all_rpis(vport);
+			lpfc_mbx_unreg_vpi(vport);
+			spin_lock_irq(shost->host_lock);
+			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+			spin_unlock_irq(shost->host_lock);
+			if (vport->port_type == LPFC_PHYSICAL_PORT
+				&& !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
+				lpfc_issue_init_vfi(vport);
+			else
+				lpfc_initial_fdisc(vport);
+			break;
+		}
+	} else {
+		spin_lock_irq(shost->host_lock);
+		vport->vpi_state |= LPFC_VPI_REGISTERED;
+		spin_unlock_irq(shost->host_lock);
+		if (vport == phba->pport) {
+			if (phba->sli_rev < LPFC_SLI_REV4)
+				lpfc_issue_fabric_reglogin(vport);
+			else {
+				/*
+				 * If the physical port is instantiated using
+				 * FDISC, do not start vport discovery.
+				 */
+				if (vport->port_state != LPFC_FDISC)
+					lpfc_start_fdiscs(phba);
+				lpfc_do_scr_ns_plogi(phba, vport);
+			}
+		} else
+			lpfc_do_scr_ns_plogi(phba, vport);
+	}
+mbox_err_exit:
+	/* Now, we decrement the ndlp reference count held for this
+	 * callback function
+	 */
+	lpfc_nlp_put(ndlp);
+
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_register_new_vport - Register a new vport with a HBA
+ * @phba: pointer to lpfc hba data structure.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine registers the @vport as a new virtual port with a HBA.
+ * It is done through a registering vpi mailbox command.
+ **/
+void
+lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
+			struct lpfc_nodelist *ndlp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	LPFC_MBOXQ_t *mbox;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (mbox) {
+		lpfc_reg_vpi(vport, mbox);
+		mbox->vport = vport;
+		mbox->context2 = lpfc_nlp_get(ndlp);
+		mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
+		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+		    == MBX_NOT_FINISHED) {
+			/* mailbox command not success, decrement ndlp
+			 * reference count for this command
+			 */
+			lpfc_nlp_put(ndlp);
+			mempool_free(mbox, phba->mbox_mem_pool);
+
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+				"0253 Register VPI: Can't send mbox\n");
+			goto mbox_err_exit;
+		}
+	} else {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+				 "0254 Register VPI: no memory\n");
+		goto mbox_err_exit;
+	}
+	return;
+
+mbox_err_exit:
+	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+	spin_unlock_irq(shost->host_lock);
+	return;
+}
+
+/**
+ * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine cancels the retry delay timers to all the vports.
+ **/
+void
+lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	struct lpfc_nodelist *ndlp;
+	uint32_t link_state;
+	int i;
+
+	/* Treat this failure as linkdown for all vports */
+	link_state = phba->link_state;
+	lpfc_linkdown(phba);
+	phba->link_state = link_state;
+
+	vports = lpfc_create_vport_work_array(phba);
+
+	if (vports) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
+			if (ndlp)
+				lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
+			lpfc_els_flush_cmd(vports[i]);
+		}
+		lpfc_destroy_vport_work_array(phba, vports);
+	}
+}
+
+/**
+ * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine abort all pending discovery commands and
+ * start a timer to retry FLOGI for the physical port
+ * discovery.
+ **/
+void
+lpfc_retry_pport_discovery(struct lpfc_hba *phba)
+{
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host  *shost;
+
+	/* Cancel the all vports retry delay retry timers */
+	lpfc_cancel_all_vport_retry_delay_timer(phba);
+
+	/* If fabric require FLOGI, then re-instantiate physical login */
+	ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+	if (!ndlp)
+		return;
+
+	shost = lpfc_shost_from_vport(phba->pport);
+	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_DELAY_TMO;
+	spin_unlock_irq(shost->host_lock);
+	ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
+	phba->pport->port_state = LPFC_FLOGI;
+	return;
+}
+
+/**
+ * lpfc_fabric_login_reqd - Check if FLOGI required.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to FDISC command iocb.
+ * @rspiocb: pointer to FDISC response iocb.
+ *
+ * This routine checks if a FLOGI is reguired for FDISC
+ * to succeed.
+ **/
+static int
+lpfc_fabric_login_reqd(struct lpfc_hba *phba,
+		struct lpfc_iocbq *cmdiocb,
+		struct lpfc_iocbq *rspiocb)
+{
+
+	if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
+		(rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
+		return 0;
+	else
+		return 1;
+}
+
+/**
+ * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function to a Fabric Discover
+ * (FDISC) ELS command. Since all the FDISC ELS commands are issued
+ * single threaded, each FDISC completion callback function will reset
+ * the discovery timer for all vports such that the timers will not get
+ * unnecessary timeout. The function checks the FDISC IOCB status. If error
+ * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
+ * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
+ * assigned to the vport has been changed with the completion of the FDISC
+ * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
+ * are unregistered from the HBA, and then the lpfc_register_new_vport()
+ * routine is invoked to register new vport with the HBA. Otherwise, the
+ * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
+ * Server for State Change Request (SCR).
+ **/
+static void
+lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		    struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+	struct lpfc_nodelist *np;
+	struct lpfc_nodelist *next_np;
+	IOCB_t *irsp = &rspiocb->iocb;
+	struct lpfc_iocbq *piocb;
+	struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+	struct serv_parm *sp;
+	uint8_t fabric_param_changed;
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
+			 irsp->ulpStatus, irsp->un.ulpWord[4],
+			 vport->fc_prevDID);
+	/* Since all FDISCs are being single threaded, we
+	 * must reset the discovery timer for ALL vports
+	 * waiting to send FDISC when one completes.
+	 */
+	list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
+		lpfc_set_disctmo(piocb->vport);
+	}
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"FDISC cmpl:      status:x%x/x%x prevdid:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
+
+	if (irsp->ulpStatus) {
+
+		if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
+			lpfc_retry_pport_discovery(phba);
+			goto out;
+		}
+
+		/* Check for retry */
+		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
+			goto out;
+		/* FDISC failed */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0126 FDISC failed. (x%x/x%x)\n",
+				 irsp->ulpStatus, irsp->un.ulpWord[4]);
+		goto fdisc_failed;
+	}
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
+	vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
+	vport->fc_flag |= FC_FABRIC;
+	if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
+		vport->fc_flag |=  FC_PUBLIC_LOOP;
+	spin_unlock_irq(shost->host_lock);
+
+	vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+	lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
+	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+	sp = prsp->virt + sizeof(uint32_t);
+	fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
+	memcpy(&vport->fabric_portname, &sp->portName,
+		sizeof(struct lpfc_name));
+	memcpy(&vport->fabric_nodename, &sp->nodeName,
+		sizeof(struct lpfc_name));
+	if (fabric_param_changed &&
+		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+		/* If our NportID changed, we need to ensure all
+		 * remaining NPORTs get unreg_login'ed so we can
+		 * issue unreg_vpi.
+		 */
+		list_for_each_entry_safe(np, next_np,
+			&vport->fc_nodes, nlp_listp) {
+			if (!NLP_CHK_NODE_ACT(ndlp) ||
+			    (np->nlp_state != NLP_STE_NPR_NODE) ||
+			    !(np->nlp_flag & NLP_NPR_ADISC))
+				continue;
+			spin_lock_irq(shost->host_lock);
+			np->nlp_flag &= ~NLP_NPR_ADISC;
+			spin_unlock_irq(shost->host_lock);
+			lpfc_unreg_rpi(vport, np);
+		}
+		lpfc_cleanup_pending_mbox(vport);
+
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			lpfc_sli4_unreg_all_rpis(vport);
+
+		lpfc_mbx_unreg_vpi(vport);
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+		else
+			vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
+		spin_unlock_irq(shost->host_lock);
+	} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
+		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+		/*
+		 * Driver needs to re-reg VPI in order for f/w
+		 * to update the MAC address.
+		 */
+		lpfc_register_new_vport(phba, vport, ndlp);
+		goto out;
+	}
+
+	if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
+		lpfc_issue_init_vpi(vport);
+	else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+		lpfc_register_new_vport(phba, vport, ndlp);
+	else
+		lpfc_do_scr_ns_plogi(phba, vport);
+	goto out;
+fdisc_failed:
+	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+	/* Cancel discovery timer */
+	lpfc_can_disctmo(vport);
+	lpfc_nlp_put(ndlp);
+out:
+	lpfc_els_free_iocb(phba, cmdiocb);
+}
+
+/**
+ * lpfc_issue_els_fdisc - Issue a fdisc iocb command
+ * @vport: pointer to a virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
+ * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
+ * routine to issue the IOCB, which makes sure only one outstanding fabric
+ * IOCB will be sent off HBA at any given time.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the FDISC ELS command.
+ *
+ * Return code
+ *   0 - Successfully issued fdisc iocb command
+ *   1 - Failed to issue fdisc iocb command
+ **/
+static int
+lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		     uint8_t retry)
+{
+	struct lpfc_hba *phba = vport->phba;
+	IOCB_t *icmd;
+	struct lpfc_iocbq *elsiocb;
+	struct serv_parm *sp;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int did = ndlp->nlp_DID;
+	int rc;
+
+	vport->port_state = LPFC_FDISC;
+	vport->fc_myDID = 0;
+	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
+				     ELS_CMD_FDISC);
+	if (!elsiocb) {
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0255 Issue FDISC: no IOCB\n");
+		return 1;
+	}
+
+	icmd = &elsiocb->iocb;
+	icmd->un.elsreq64.myID = 0;
+	icmd->un.elsreq64.fl = 1;
+
+	/*
+	 * SLI3 ports require a different context type value than SLI4.
+	 * Catch SLI3 ports here and override the prep.
+	 */
+	if (phba->sli_rev == LPFC_SLI_REV3) {
+		icmd->ulpCt_h = 1;
+		icmd->ulpCt_l = 0;
+	}
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	*((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
+	pcmd += sizeof(uint32_t); /* CSP Word 1 */
+	memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
+	sp = (struct serv_parm *) pcmd;
+	/* Setup CSPs accordingly for Fabric */
+	sp->cmn.e_d_tov = 0;
+	sp->cmn.w2.r_a_tov = 0;
+	sp->cmn.virtual_fabric_support = 0;
+	sp->cls1.classValid = 0;
+	sp->cls2.seqDelivery = 1;
+	sp->cls3.seqDelivery = 1;
+
+	pcmd += sizeof(uint32_t); /* CSP Word 2 */
+	pcmd += sizeof(uint32_t); /* CSP Word 3 */
+	pcmd += sizeof(uint32_t); /* CSP Word 4 */
+	pcmd += sizeof(uint32_t); /* Port Name */
+	memcpy(pcmd, &vport->fc_portname, 8);
+	pcmd += sizeof(uint32_t); /* Node Name */
+	pcmd += sizeof(uint32_t); /* Node Name */
+	memcpy(pcmd, &vport->fc_nodename, 8);
+
+	lpfc_set_disctmo(vport);
+
+	phba->fc_stat.elsXmitFDISC++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue FDISC:     did:x%x",
+		did, 0, 0);
+
+	rc = lpfc_issue_fabric_iocb(phba, elsiocb);
+	if (rc == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0256 Issue FDISC: Cannot send IOCB\n");
+		return 1;
+	}
+	lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
+	return 0;
+}
+
+/**
+ * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function to the issuing of a LOGO
+ * ELS command off a vport. It frees the command IOCB and then decrement the
+ * reference count held on ndlp for this completion function, indicating that
+ * the reference to the ndlp is no long needed. Note that the
+ * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
+ * callback function and an additional explicit ndlp reference decrementation
+ * will trigger the actual release of the ndlp.
+ **/
+static void
+lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	IOCB_t *irsp;
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+	irsp = &rspiocb->iocb;
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"LOGO npiv cmpl:  status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
+
+	lpfc_els_free_iocb(phba, cmdiocb);
+	vport->unreg_vpi_cmpl = VPORT_ERROR;
+
+	/* Trigger the release of the ndlp after logo */
+	lpfc_nlp_put(ndlp);
+
+	/* NPIV LOGO completes to NPort <nlp_DID> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "2928 NPIV LOGO completes to NPort x%x "
+			 "Data: x%x x%x x%x x%x\n",
+			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+			 irsp->ulpTimeout, vport->num_disc_nodes);
+
+	if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag &= ~FC_FABRIC;
+		spin_unlock_irq(shost->host_lock);
+	}
+}
+
+/**
+ * lpfc_issue_els_npiv_logo - Issue a logo off a vport
+ * @vport: pointer to a virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine issues a LOGO ELS command to an @ndlp off a @vport.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the LOGO ELS command.
+ *
+ * Return codes
+ *   0 - Successfully issued logo off the @vport
+ *   1 - Failed to issue logo off the @vport
+ **/
+int
+lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	IOCB_t *icmd;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+
+	cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
+				     ELS_CMD_LOGO);
+	if (!elsiocb)
+		return 1;
+
+	icmd = &elsiocb->iocb;
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
+	pcmd += sizeof(uint32_t);
+
+	/* Fill in LOGO payload */
+	*((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
+	pcmd += sizeof(uint32_t);
+	memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue LOGO npiv  did:x%x flg:x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_LOGO_SND;
+	spin_unlock_irq(shost->host_lock);
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+	    IOCB_ERROR) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag &= ~NLP_LOGO_SND;
+		spin_unlock_irq(shost->host_lock);
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_fabric_block_timeout - Handler function to the fabric block timer
+ * @ptr: holder for the timer function associated data.
+ *
+ * This routine is invoked by the fabric iocb block timer after
+ * timeout. It posts the fabric iocb block timeout event by setting the
+ * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
+ * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
+ * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
+ * posted event WORKER_FABRIC_BLOCK_TMO.
+ **/
+void
+lpfc_fabric_block_timeout(unsigned long ptr)
+{
+	struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
+	unsigned long iflags;
+	uint32_t tmo_posted;
+
+	spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
+	tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
+	if (!tmo_posted)
+		phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
+	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
+
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+	return;
+}
+
+/**
+ * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine issues one fabric iocb from the driver internal list to
+ * the HBA. It first checks whether it's ready to issue one fabric iocb to
+ * the HBA (whether there is no outstanding fabric iocb). If so, it shall
+ * remove one pending fabric iocb from the driver internal list and invokes
+ * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
+ **/
+static void
+lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
+{
+	struct lpfc_iocbq *iocb;
+	unsigned long iflags;
+	int ret;
+	IOCB_t *cmd;
+
+repeat:
+	iocb = NULL;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	/* Post any pending iocb to the SLI layer */
+	if (atomic_read(&phba->fabric_iocb_count) == 0) {
+		list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
+				 list);
+		if (iocb)
+			/* Increment fabric iocb count to hold the position */
+			atomic_inc(&phba->fabric_iocb_count);
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	if (iocb) {
+		iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
+		iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
+		iocb->iocb_flag |= LPFC_IO_FABRIC;
+
+		lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
+			"Fabric sched1:   ste:x%x",
+			iocb->vport->port_state, 0, 0);
+
+		ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
+
+		if (ret == IOCB_ERROR) {
+			iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
+			iocb->fabric_iocb_cmpl = NULL;
+			iocb->iocb_flag &= ~LPFC_IO_FABRIC;
+			cmd = &iocb->iocb;
+			cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+			cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+			iocb->iocb_cmpl(phba, iocb, iocb);
+
+			atomic_dec(&phba->fabric_iocb_count);
+			goto repeat;
+		}
+	}
+
+	return;
+}
+
+/**
+ * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine unblocks the  issuing fabric iocb command. The function
+ * will clear the fabric iocb block bit and then invoke the routine
+ * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
+ * from the driver internal fabric iocb list.
+ **/
+void
+lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
+{
+	clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+
+	lpfc_resume_fabric_iocbs(phba);
+	return;
+}
+
+/**
+ * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine blocks the issuing fabric iocb for a specified amount of
+ * time (currently 100 ms). This is done by set the fabric iocb block bit
+ * and set up a timeout timer for 100ms. When the block bit is set, no more
+ * fabric iocb will be issued out of the HBA.
+ **/
+static void
+lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
+{
+	int blocked;
+
+	blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+	/* Start a timer to unblock fabric iocbs after 100ms */
+	if (!blocked)
+		mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
+
+	return;
+}
+
+/**
+ * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the callback function that is put to the fabric iocb's
+ * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
+ * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
+ * function first restores and invokes the original iocb's callback function
+ * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
+ * fabric bound iocb from the driver internal fabric iocb list onto the wire.
+ **/
+static void
+lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+	struct lpfc_iocbq *rspiocb)
+{
+	struct ls_rjt stat;
+
+	if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
+		BUG();
+
+	switch (rspiocb->iocb.ulpStatus) {
+		case IOSTAT_NPORT_RJT:
+		case IOSTAT_FABRIC_RJT:
+			if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
+				lpfc_block_fabric_iocbs(phba);
+			}
+			break;
+
+		case IOSTAT_NPORT_BSY:
+		case IOSTAT_FABRIC_BSY:
+			lpfc_block_fabric_iocbs(phba);
+			break;
+
+		case IOSTAT_LS_RJT:
+			stat.un.lsRjtError =
+				be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
+			if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
+				(stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
+				lpfc_block_fabric_iocbs(phba);
+			break;
+	}
+
+	if (atomic_read(&phba->fabric_iocb_count) == 0)
+		BUG();
+
+	cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
+	cmdiocb->fabric_iocb_cmpl = NULL;
+	cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
+	cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
+
+	atomic_dec(&phba->fabric_iocb_count);
+	if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
+		/* Post any pending iocbs to HBA */
+		lpfc_resume_fabric_iocbs(phba);
+	}
+}
+
+/**
+ * lpfc_issue_fabric_iocb - Issue a fabric iocb command
+ * @phba: pointer to lpfc hba data structure.
+ * @iocb: pointer to lpfc command iocb data structure.
+ *
+ * This routine is used as the top-level API for issuing a fabric iocb command
+ * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
+ * function makes sure that only one fabric bound iocb will be outstanding at
+ * any given time. As such, this function will first check to see whether there
+ * is already an outstanding fabric iocb on the wire. If so, it will put the
+ * newly issued iocb onto the driver internal fabric iocb list, waiting to be
+ * issued later. Otherwise, it will issue the iocb on the wire and update the
+ * fabric iocb count it indicate that there is one fabric iocb on the wire.
+ *
+ * Note, this implementation has a potential sending out fabric IOCBs out of
+ * order. The problem is caused by the construction of the "ready" boolen does
+ * not include the condition that the internal fabric IOCB list is empty. As
+ * such, it is possible a fabric IOCB issued by this routine might be "jump"
+ * ahead of the fabric IOCBs in the internal list.
+ *
+ * Return code
+ *   IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
+ *   IOCB_ERROR - failed to issue fabric iocb
+ **/
+static int
+lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
+{
+	unsigned long iflags;
+	int ready;
+	int ret;
+
+	if (atomic_read(&phba->fabric_iocb_count) > 1)
+		BUG();
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
+		!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+
+	if (ready)
+		/* Increment fabric iocb count to hold the position */
+		atomic_inc(&phba->fabric_iocb_count);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	if (ready) {
+		iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
+		iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
+		iocb->iocb_flag |= LPFC_IO_FABRIC;
+
+		lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
+			"Fabric sched2:   ste:x%x",
+			iocb->vport->port_state, 0, 0);
+
+		ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
+
+		if (ret == IOCB_ERROR) {
+			iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
+			iocb->fabric_iocb_cmpl = NULL;
+			iocb->iocb_flag &= ~LPFC_IO_FABRIC;
+			atomic_dec(&phba->fabric_iocb_count);
+		}
+	} else {
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		list_add_tail(&iocb->list, &phba->fabric_iocb_list);
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		ret = IOCB_SUCCESS;
+	}
+	return ret;
+}
+
+/**
+ * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine aborts all the IOCBs associated with a @vport from the
+ * driver internal fabric IOCB list. The list contains fabric IOCBs to be
+ * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
+ * list, removes each IOCB associated with the @vport off the list, set the
+ * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
+ * associated with the IOCB.
+ **/
+static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
+{
+	LIST_HEAD(completions);
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_iocbq *tmp_iocb, *piocb;
+
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
+				 list) {
+
+		if (piocb->vport != vport)
+			continue;
+
+		list_move_tail(&piocb->list, &completions);
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_ABORTED);
+}
+
+/**
+ * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine aborts all the IOCBs associated with an @ndlp from the
+ * driver internal fabric IOCB list. The list contains fabric IOCBs to be
+ * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
+ * list, removes each IOCB associated with the @ndlp off the list, set the
+ * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
+ * associated with the IOCB.
+ **/
+void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
+{
+	LIST_HEAD(completions);
+	struct lpfc_hba  *phba = ndlp->phba;
+	struct lpfc_iocbq *tmp_iocb, *piocb;
+	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
+				 list) {
+		if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
+
+			list_move_tail(&piocb->list, &completions);
+		}
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_ABORTED);
+}
+
+/**
+ * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine aborts all the IOCBs currently on the driver internal
+ * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
+ * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
+ * list, removes IOCBs off the list, set the status feild to
+ * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
+ * the IOCB.
+ **/
+void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
+{
+	LIST_HEAD(completions);
+
+	spin_lock_irq(&phba->hbalock);
+	list_splice_init(&phba->fabric_iocb_list, &completions);
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_ABORTED);
+}
+
+/**
+ * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
+ * @vport: pointer to lpfc vport data structure.
+ *
+ * This routine is invoked by the vport cleanup for deletions and the cleanup
+ * for an ndlp on removal.
+ **/
+void
+lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+	unsigned long iflag = 0;
+
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
+	list_for_each_entry_safe(sglq_entry, sglq_next,
+			&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
+		if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
+			sglq_entry->ndlp = NULL;
+	}
+	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return;
+}
+
+/**
+ * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the els xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 slow-path
+ * ELS aborted xri.
+ **/
+void
+lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
+			  struct sli4_wcqe_xri_aborted *axri)
+{
+	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+	uint16_t lxri = 0;
+
+	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+	unsigned long iflag = 0;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
+	list_for_each_entry_safe(sglq_entry, sglq_next,
+			&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
+		if (sglq_entry->sli4_xritag == xri) {
+			list_del(&sglq_entry->list);
+			ndlp = sglq_entry->ndlp;
+			sglq_entry->ndlp = NULL;
+			list_add_tail(&sglq_entry->list,
+				&phba->sli4_hba.lpfc_sgl_list);
+			sglq_entry->state = SGL_FREED;
+			spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
+
+			/* Check if TXQ queue needs to be serviced */
+			if (pring->txq_cnt)
+				lpfc_worker_wake_up(phba);
+			return;
+		}
+	}
+	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+	lxri = lpfc_sli4_xri_inrange(phba, xri);
+	if (lxri == NO_XRI) {
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		return;
+	}
+	sglq_entry = __lpfc_get_active_sglq(phba, lxri);
+	if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		return;
+	}
+	sglq_entry->state = SGL_XRI_ABORTED;
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_hbadisc.c b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_hbadisc.c
new file mode 100644
index 0000000..b507536
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -0,0 +1,6143 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
+
+/* AlpaArray for assignment of scsid for scan-down and bind_method */
+static uint8_t lpfcAlpaArray[] = {
+	0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
+	0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
+	0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
+	0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
+	0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
+	0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
+	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
+	0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
+	0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
+	0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
+	0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
+	0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
+	0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
+};
+
+static void lpfc_disc_timeout_handler(struct lpfc_vport *);
+static void lpfc_disc_flush_list(struct lpfc_vport *vport);
+static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+static int lpfc_fcf_inuse(struct lpfc_hba *);
+
+void
+lpfc_terminate_rport_io(struct fc_rport *rport)
+{
+	struct lpfc_rport_data *rdata;
+	struct lpfc_nodelist * ndlp;
+	struct lpfc_hba *phba;
+
+	rdata = rport->dd_data;
+	ndlp = rdata->pnode;
+
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+		if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
+			printk(KERN_ERR "Cannot find remote node"
+			" to terminate I/O Data x%x\n",
+			rport->port_id);
+		return;
+	}
+
+	phba  = ndlp->phba;
+
+	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
+		"rport terminate: sid:x%x did:x%x flg:x%x",
+		ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
+
+	if (ndlp->nlp_sid != NLP_NO_SID) {
+		lpfc_sli_abort_iocb(ndlp->vport,
+			&phba->sli.ring[phba->sli.fcp_ring],
+			ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+	}
+}
+
+/*
+ * This function will be called when dev_loss_tmo fire.
+ */
+void
+lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
+{
+	struct lpfc_rport_data *rdata;
+	struct lpfc_nodelist * ndlp;
+	struct lpfc_vport *vport;
+	struct lpfc_hba   *phba;
+	struct lpfc_work_evt *evtp;
+	int  put_node;
+	int  put_rport;
+
+	rdata = rport->dd_data;
+	ndlp = rdata->pnode;
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+		return;
+
+	vport = ndlp->vport;
+	phba  = vport->phba;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+		"rport devlosscb: sid:x%x did:x%x flg:x%x",
+		ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
+
+	/* Don't defer this if we are in the process of deleting the vport
+	 * or unloading the driver. The unload will cleanup the node
+	 * appropriately we just need to cleanup the ndlp rport info here.
+	 */
+	if (vport->load_flag & FC_UNLOADING) {
+		put_node = rdata->pnode != NULL;
+		put_rport = ndlp->rport != NULL;
+		rdata->pnode = NULL;
+		ndlp->rport = NULL;
+		if (put_node)
+			lpfc_nlp_put(ndlp);
+		if (put_rport)
+			put_device(&rport->dev);
+		return;
+	}
+
+	if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
+		return;
+
+	evtp = &ndlp->dev_loss_evt;
+
+	if (!list_empty(&evtp->evt_listp))
+		return;
+
+	spin_lock_irq(&phba->hbalock);
+	/* We need to hold the node by incrementing the reference
+	 * count until this queued work is done
+	 */
+	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
+	if (evtp->evt_arg1) {
+		evtp->evt = LPFC_EVT_DEV_LOSS;
+		list_add_tail(&evtp->evt_listp, &phba->work_list);
+		lpfc_worker_wake_up(phba);
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	return;
+}
+
+/**
+ * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
+ * @ndlp: Pointer to remote node object.
+ *
+ * This function is called from the worker thread when devloss timeout timer
+ * expires. For SLI4 host, this routine shall return 1 when at lease one
+ * remote node, including this @ndlp, is still in use of FCF; otherwise, this
+ * routine shall return 0 when there is no remote node is still in use of FCF
+ * when devloss timeout happened to this @ndlp.
+ **/
+static int
+lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_rport_data *rdata;
+	struct fc_rport   *rport;
+	struct lpfc_vport *vport;
+	struct lpfc_hba   *phba;
+	uint8_t *name;
+	int  put_node;
+	int  put_rport;
+	int warn_on = 0;
+	int fcf_inuse = 0;
+
+	rport = ndlp->rport;
+
+	if (!rport)
+		return fcf_inuse;
+
+	rdata = rport->dd_data;
+	name = (uint8_t *) &ndlp->nlp_portname;
+	vport = ndlp->vport;
+	phba  = vport->phba;
+
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		fcf_inuse = lpfc_fcf_inuse(phba);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+		"rport devlosstmo:did:x%x type:x%x id:x%x",
+		ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
+
+	/* Don't defer this if we are in the process of deleting the vport
+	 * or unloading the driver. The unload will cleanup the node
+	 * appropriately we just need to cleanup the ndlp rport info here.
+	 */
+	if (vport->load_flag & FC_UNLOADING) {
+		if (ndlp->nlp_sid != NLP_NO_SID) {
+			/* flush the target */
+			lpfc_sli_abort_iocb(vport,
+					&phba->sli.ring[phba->sli.fcp_ring],
+					ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+		}
+		put_node = rdata->pnode != NULL;
+		put_rport = ndlp->rport != NULL;
+		rdata->pnode = NULL;
+		ndlp->rport = NULL;
+		if (put_node)
+			lpfc_nlp_put(ndlp);
+		if (put_rport)
+			put_device(&rport->dev);
+		return fcf_inuse;
+	}
+
+	if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "0284 Devloss timeout Ignored on "
+				 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
+				 "NPort x%x\n",
+				 *name, *(name+1), *(name+2), *(name+3),
+				 *(name+4), *(name+5), *(name+6), *(name+7),
+				 ndlp->nlp_DID);
+		return fcf_inuse;
+	}
+
+	if (ndlp->nlp_type & NLP_FABRIC) {
+		/* We will clean up these Nodes in linkup */
+		put_node = rdata->pnode != NULL;
+		put_rport = ndlp->rport != NULL;
+		rdata->pnode = NULL;
+		ndlp->rport = NULL;
+		if (put_node)
+			lpfc_nlp_put(ndlp);
+		if (put_rport)
+			put_device(&rport->dev);
+		return fcf_inuse;
+	}
+
+	if (ndlp->nlp_sid != NLP_NO_SID) {
+		warn_on = 1;
+		/* flush the target */
+		lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
+				    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+	}
+
+	if (warn_on) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "0203 Devloss timeout on "
+				 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+				 "NPort x%06x Data: x%x x%x x%x\n",
+				 *name, *(name+1), *(name+2), *(name+3),
+				 *(name+4), *(name+5), *(name+6), *(name+7),
+				 ndlp->nlp_DID, ndlp->nlp_flag,
+				 ndlp->nlp_state, ndlp->nlp_rpi);
+	} else {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+				 "0204 Devloss timeout on "
+				 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+				 "NPort x%06x Data: x%x x%x x%x\n",
+				 *name, *(name+1), *(name+2), *(name+3),
+				 *(name+4), *(name+5), *(name+6), *(name+7),
+				 ndlp->nlp_DID, ndlp->nlp_flag,
+				 ndlp->nlp_state, ndlp->nlp_rpi);
+	}
+
+	put_node = rdata->pnode != NULL;
+	put_rport = ndlp->rport != NULL;
+	rdata->pnode = NULL;
+	ndlp->rport = NULL;
+	if (put_node)
+		lpfc_nlp_put(ndlp);
+	if (put_rport)
+		put_device(&rport->dev);
+
+	if (!(vport->load_flag & FC_UNLOADING) &&
+	    !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
+	    !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
+	    (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+	    (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
+	    (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
+		lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
+
+	return fcf_inuse;
+}
+
+/**
+ * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
+ * @phba: Pointer to hba context object.
+ * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
+ * @nlp_did: remote node identifer with devloss timeout.
+ *
+ * This function is called from the worker thread after invoking devloss
+ * timeout handler and releasing the reference count for the ndlp with
+ * which the devloss timeout was handled for SLI4 host. For the devloss
+ * timeout of the last remote node which had been in use of FCF, when this
+ * routine is invoked, it shall be guaranteed that none of the remote are
+ * in-use of FCF. When devloss timeout to the last remote using the FCF,
+ * if the FIP engine is neither in FCF table scan process nor roundrobin
+ * failover process, the in-use FCF shall be unregistered. If the FIP
+ * engine is in FCF discovery process, the devloss timeout state shall
+ * be set for either the FCF table scan process or roundrobin failover
+ * process to unregister the in-use FCF.
+ **/
+static void
+lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
+				    uint32_t nlp_did)
+{
+	/* If devloss timeout happened to a remote node when FCF had no
+	 * longer been in-use, do nothing.
+	 */
+	if (!fcf_inuse)
+		return;
+
+	if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
+		spin_lock_irq(&phba->hbalock);
+		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+			if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+				spin_unlock_irq(&phba->hbalock);
+				return;
+			}
+			phba->hba_flag |= HBA_DEVLOSS_TMO;
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2847 Last remote node (x%x) using "
+					"FCF devloss tmo\n", nlp_did);
+		}
+		if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2868 Devloss tmo to FCF rediscovery "
+					"in progress\n");
+			return;
+		}
+		if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2869 Devloss tmo to idle FIP engine, "
+					"unreg in-use FCF and rescan.\n");
+			/* Unregister in-use FCF and rescan */
+			lpfc_unregister_fcf_rescan(phba);
+			return;
+		}
+		spin_unlock_irq(&phba->hbalock);
+		if (phba->hba_flag & FCF_TS_INPROG)
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2870 FCF table scan in progress\n");
+		if (phba->hba_flag & FCF_RR_INPROG)
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2871 FLOGI roundrobin FCF failover "
+					"in progress\n");
+	}
+	lpfc_unregister_unused_fcf(phba);
+}
+
+/**
+ * lpfc_alloc_fast_evt - Allocates data structure for posting event
+ * @phba: Pointer to hba context object.
+ *
+ * This function is called from the functions which need to post
+ * events from interrupt context. This function allocates data
+ * structure required for posting event. It also keeps track of
+ * number of events pending and prevent event storm when there are
+ * too many events.
+ **/
+struct lpfc_fast_path_event *
+lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
+	struct lpfc_fast_path_event *ret;
+
+	/* If there are lot of fast event do not exhaust memory due to this */
+	if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
+		return NULL;
+
+	ret = kzalloc(sizeof(struct lpfc_fast_path_event),
+			GFP_ATOMIC);
+	if (ret) {
+		atomic_inc(&phba->fast_event_count);
+		INIT_LIST_HEAD(&ret->work_evt.evt_listp);
+		ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
+	}
+	return ret;
+}
+
+/**
+ * lpfc_free_fast_evt - Frees event data structure
+ * @phba: Pointer to hba context object.
+ * @evt:  Event object which need to be freed.
+ *
+ * This function frees the data structure required for posting
+ * events.
+ **/
+void
+lpfc_free_fast_evt(struct lpfc_hba *phba,
+		struct lpfc_fast_path_event *evt) {
+
+	atomic_dec(&phba->fast_event_count);
+	kfree(evt);
+}
+
+/**
+ * lpfc_send_fastpath_evt - Posts events generated from fast path
+ * @phba: Pointer to hba context object.
+ * @evtp: Event data structure.
+ *
+ * This function is called from worker thread, when the interrupt
+ * context need to post an event. This function posts the event
+ * to fc transport netlink interface.
+ **/
+static void
+lpfc_send_fastpath_evt(struct lpfc_hba *phba,
+		struct lpfc_work_evt *evtp)
+{
+	unsigned long evt_category, evt_sub_category;
+	struct lpfc_fast_path_event *fast_evt_data;
+	char *evt_data;
+	uint32_t evt_data_size;
+	struct Scsi_Host *shost;
+
+	fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
+		work_evt);
+
+	evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
+	evt_sub_category = (unsigned long) fast_evt_data->un.
+			fabric_evt.subcategory;
+	shost = lpfc_shost_from_vport(fast_evt_data->vport);
+	if (evt_category == FC_REG_FABRIC_EVENT) {
+		if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
+			evt_data = (char *) &fast_evt_data->un.read_check_error;
+			evt_data_size = sizeof(fast_evt_data->un.
+				read_check_error);
+		} else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
+			(evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
+			evt_data = (char *) &fast_evt_data->un.fabric_evt;
+			evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
+		} else {
+			lpfc_free_fast_evt(phba, fast_evt_data);
+			return;
+		}
+	} else if (evt_category == FC_REG_SCSI_EVENT) {
+		switch (evt_sub_category) {
+		case LPFC_EVENT_QFULL:
+		case LPFC_EVENT_DEVBSY:
+			evt_data = (char *) &fast_evt_data->un.scsi_evt;
+			evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
+			break;
+		case LPFC_EVENT_CHECK_COND:
+			evt_data = (char *) &fast_evt_data->un.check_cond_evt;
+			evt_data_size =  sizeof(fast_evt_data->un.
+				check_cond_evt);
+			break;
+		case LPFC_EVENT_VARQUEDEPTH:
+			evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
+			evt_data_size = sizeof(fast_evt_data->un.
+				queue_depth_evt);
+			break;
+		default:
+			lpfc_free_fast_evt(phba, fast_evt_data);
+			return;
+		}
+	} else {
+		lpfc_free_fast_evt(phba, fast_evt_data);
+		return;
+	}
+
+	fc_host_post_vendor_event(shost,
+		fc_get_event_number(),
+		evt_data_size,
+		evt_data,
+		LPFC_NL_VENDOR_ID);
+
+	lpfc_free_fast_evt(phba, fast_evt_data);
+	return;
+}
+
+static void
+lpfc_work_list_done(struct lpfc_hba *phba)
+{
+	struct lpfc_work_evt  *evtp = NULL;
+	struct lpfc_nodelist  *ndlp;
+	int free_evt;
+	int fcf_inuse;
+	uint32_t nlp_did;
+
+	spin_lock_irq(&phba->hbalock);
+	while (!list_empty(&phba->work_list)) {
+		list_remove_head((&phba->work_list), evtp, typeof(*evtp),
+				 evt_listp);
+		spin_unlock_irq(&phba->hbalock);
+		free_evt = 1;
+		switch (evtp->evt) {
+		case LPFC_EVT_ELS_RETRY:
+			ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
+			lpfc_els_retry_delay_handler(ndlp);
+			free_evt = 0; /* evt is part of ndlp */
+			/* decrement the node reference count held
+			 * for this queued work
+			 */
+			lpfc_nlp_put(ndlp);
+			break;
+		case LPFC_EVT_DEV_LOSS:
+			ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
+			fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
+			free_evt = 0;
+			/* decrement the node reference count held for
+			 * this queued work
+			 */
+			nlp_did = ndlp->nlp_DID;
+			lpfc_nlp_put(ndlp);
+			if (phba->sli_rev == LPFC_SLI_REV4)
+				lpfc_sli4_post_dev_loss_tmo_handler(phba,
+								    fcf_inuse,
+								    nlp_did);
+			break;
+		case LPFC_EVT_ONLINE:
+			if (phba->link_state < LPFC_LINK_DOWN)
+				*(int *) (evtp->evt_arg1) = lpfc_online(phba);
+			else
+				*(int *) (evtp->evt_arg1) = 0;
+			complete((struct completion *)(evtp->evt_arg2));
+			break;
+		case LPFC_EVT_OFFLINE_PREP:
+			if (phba->link_state >= LPFC_LINK_DOWN)
+				lpfc_offline_prep(phba);
+			*(int *)(evtp->evt_arg1) = 0;
+			complete((struct completion *)(evtp->evt_arg2));
+			break;
+		case LPFC_EVT_OFFLINE:
+			lpfc_offline(phba);
+			lpfc_sli_brdrestart(phba);
+			*(int *)(evtp->evt_arg1) =
+				lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
+			lpfc_unblock_mgmt_io(phba);
+			complete((struct completion *)(evtp->evt_arg2));
+			break;
+		case LPFC_EVT_WARM_START:
+			lpfc_offline(phba);
+			lpfc_reset_barrier(phba);
+			lpfc_sli_brdreset(phba);
+			lpfc_hba_down_post(phba);
+			*(int *)(evtp->evt_arg1) =
+				lpfc_sli_brdready(phba, HS_MBRDY);
+			lpfc_unblock_mgmt_io(phba);
+			complete((struct completion *)(evtp->evt_arg2));
+			break;
+		case LPFC_EVT_KILL:
+			lpfc_offline(phba);
+			*(int *)(evtp->evt_arg1)
+				= (phba->pport->stopped)
+				        ? 0 : lpfc_sli_brdkill(phba);
+			lpfc_unblock_mgmt_io(phba);
+			complete((struct completion *)(evtp->evt_arg2));
+			break;
+		case LPFC_EVT_FASTPATH_MGMT_EVT:
+			lpfc_send_fastpath_evt(phba, evtp);
+			free_evt = 0;
+			break;
+		case LPFC_EVT_RESET_HBA:
+			if (!(phba->pport->load_flag & FC_UNLOADING))
+				lpfc_reset_hba(phba);
+			break;
+		}
+		if (free_evt)
+			kfree(evtp);
+		spin_lock_irq(&phba->hbalock);
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+}
+
+static void
+lpfc_work_done(struct lpfc_hba *phba)
+{
+	struct lpfc_sli_ring *pring;
+	uint32_t ha_copy, status, control, work_port_events;
+	struct lpfc_vport **vports;
+	struct lpfc_vport *vport;
+	int i;
+
+	spin_lock_irq(&phba->hbalock);
+	ha_copy = phba->work_ha;
+	phba->work_ha = 0;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* First, try to post the next mailbox command to SLI4 device */
+	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+		lpfc_sli4_post_async_mbox(phba);
+
+	if (ha_copy & HA_ERATT)
+		/* Handle the error attention event */
+		lpfc_handle_eratt(phba);
+
+	if (ha_copy & HA_MBATT)
+		lpfc_sli_handle_mb_event(phba);
+
+	if (ha_copy & HA_LATT)
+		lpfc_handle_latt(phba);
+
+	/* Process SLI4 events */
+	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
+		if (phba->hba_flag & HBA_RRQ_ACTIVE)
+			lpfc_handle_rrq_active(phba);
+		if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
+			lpfc_sli4_fcp_xri_abort_event_proc(phba);
+		if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
+			lpfc_sli4_els_xri_abort_event_proc(phba);
+		if (phba->hba_flag & ASYNC_EVENT)
+			lpfc_sli4_async_event_proc(phba);
+		if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
+			spin_lock_irq(&phba->hbalock);
+			phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
+		}
+		if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
+			lpfc_sli4_fcf_redisc_event_proc(phba);
+	}
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vports; i++) {
+			/*
+			 * We could have no vports in array if unloading, so if
+			 * this happens then just use the pport
+			 */
+			if (vports[i] == NULL && i == 0)
+				vport = phba->pport;
+			else
+				vport = vports[i];
+			if (vport == NULL)
+				break;
+			spin_lock_irq(&vport->work_port_lock);
+			work_port_events = vport->work_port_events;
+			vport->work_port_events &= ~work_port_events;
+			spin_unlock_irq(&vport->work_port_lock);
+			if (work_port_events & WORKER_DISC_TMO)
+				lpfc_disc_timeout_handler(vport);
+			if (work_port_events & WORKER_ELS_TMO)
+				lpfc_els_timeout_handler(vport);
+			if (work_port_events & WORKER_HB_TMO)
+				lpfc_hb_timeout_handler(phba);
+			if (work_port_events & WORKER_MBOX_TMO)
+				lpfc_mbox_timeout_handler(phba);
+			if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
+				lpfc_unblock_fabric_iocbs(phba);
+			if (work_port_events & WORKER_FDMI_TMO)
+				lpfc_fdmi_timeout_handler(vport);
+			if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
+				lpfc_ramp_down_queue_handler(phba);
+			if (work_port_events & WORKER_RAMP_UP_QUEUE)
+				lpfc_ramp_up_queue_handler(phba);
+			if (work_port_events & WORKER_DELAYED_DISC_TMO)
+				lpfc_delayed_disc_timeout_handler(vport);
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+
+	pring = &phba->sli.ring[LPFC_ELS_RING];
+	status = (ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
+	status >>= (4*LPFC_ELS_RING);
+	if ((status & HA_RXMASK) ||
+	    (pring->flag & LPFC_DEFERRED_RING_EVENT) ||
+	    (phba->hba_flag & HBA_SP_QUEUE_EVT)) {
+		if (pring->flag & LPFC_STOP_IOCB_EVENT) {
+			pring->flag |= LPFC_DEFERRED_RING_EVENT;
+			/* Set the lpfc data pending flag */
+			set_bit(LPFC_DATA_READY, &phba->data_flags);
+		} else {
+			pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
+			lpfc_sli_handle_slow_ring_event(phba, pring,
+							(status &
+							 HA_RXMASK));
+		}
+		if ((phba->sli_rev == LPFC_SLI_REV4) && pring->txq_cnt)
+			lpfc_drain_txq(phba);
+		/*
+		 * Turn on Ring interrupts
+		 */
+		if (phba->sli_rev <= LPFC_SLI_REV3) {
+			spin_lock_irq(&phba->hbalock);
+			control = readl(phba->HCregaddr);
+			if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
+				lpfc_debugfs_slow_ring_trc(phba,
+					"WRK Enable ring: cntl:x%x hacopy:x%x",
+					control, ha_copy, 0);
+
+				control |= (HC_R0INT_ENA << LPFC_ELS_RING);
+				writel(control, phba->HCregaddr);
+				readl(phba->HCregaddr); /* flush */
+			} else {
+				lpfc_debugfs_slow_ring_trc(phba,
+					"WRK Ring ok:     cntl:x%x hacopy:x%x",
+					control, ha_copy, 0);
+			}
+			spin_unlock_irq(&phba->hbalock);
+		}
+	}
+	lpfc_work_list_done(phba);
+}
+
+int
+lpfc_do_work(void *p)
+{
+	struct lpfc_hba *phba = p;
+	int rc;
+
+	set_user_nice(current, -20);
+	phba->data_flags = 0;
+
+	while (!kthread_should_stop()) {
+		/* wait and check worker queue activities */
+		rc = wait_event_interruptible(phba->work_waitq,
+					(test_and_clear_bit(LPFC_DATA_READY,
+							    &phba->data_flags)
+					 || kthread_should_stop()));
+		/* Signal wakeup shall terminate the worker thread */
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+					"0433 Wakeup on signal: rc=x%x\n", rc);
+			break;
+		}
+
+		/* Attend pending lpfc data processing */
+		lpfc_work_done(phba);
+	}
+	phba->worker_thread = NULL;
+	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+			"0432 Worker thread stopped.\n");
+	return 0;
+}
+
+/*
+ * This is only called to handle FC worker events. Since this a rare
+ * occurrence, we allocate a struct lpfc_work_evt structure here instead of
+ * embedding it in the IOCB.
+ */
+int
+lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
+		      uint32_t evt)
+{
+	struct lpfc_work_evt  *evtp;
+	unsigned long flags;
+
+	/*
+	 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
+	 * be queued to worker thread for processing
+	 */
+	evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
+	if (!evtp)
+		return 0;
+
+	evtp->evt_arg1  = arg1;
+	evtp->evt_arg2  = arg2;
+	evtp->evt       = evt;
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	list_add_tail(&evtp->evt_listp, &phba->work_list);
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
+	lpfc_worker_wake_up(phba);
+
+	return 1;
+}
+
+void
+lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_nodelist *ndlp, *next_ndlp;
+	int  rc;
+
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			continue;
+		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+			continue;
+		if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
+			((vport->port_type == LPFC_NPIV_PORT) &&
+			(ndlp->nlp_DID == NameServer_DID)))
+			lpfc_unreg_rpi(vport, ndlp);
+
+		/* Leave Fabric nodes alone on link down */
+		if ((phba->sli_rev < LPFC_SLI_REV4) &&
+		    (!remove && ndlp->nlp_type & NLP_FABRIC))
+			continue;
+		rc = lpfc_disc_state_machine(vport, ndlp, NULL,
+					     remove
+					     ? NLP_EVT_DEVICE_RM
+					     : NLP_EVT_DEVICE_RECOVERY);
+	}
+	if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			lpfc_sli4_unreg_all_rpis(vport);
+		lpfc_mbx_unreg_vpi(vport);
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+		spin_unlock_irq(shost->host_lock);
+	}
+}
+
+void
+lpfc_port_link_failure(struct lpfc_vport *vport)
+{
+	lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+
+	/* Cleanup any outstanding received buffers */
+	lpfc_cleanup_rcv_buffers(vport);
+
+	/* Cleanup any outstanding RSCN activity */
+	lpfc_els_flush_rscn(vport);
+
+	/* Cleanup any outstanding ELS commands */
+	lpfc_els_flush_cmd(vport);
+
+	lpfc_cleanup_rpis(vport, 0);
+
+	/* Turn off discovery timer if its running */
+	lpfc_can_disctmo(vport);
+}
+
+void
+lpfc_linkdown_port(struct lpfc_vport *vport)
+{
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+
+	fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Link Down:       state:x%x rtry:x%x flg:x%x",
+		vport->port_state, vport->fc_ns_retry, vport->fc_flag);
+
+	lpfc_port_link_failure(vport);
+
+	/* Stop delayed Nport discovery */
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_DISC_DELAYED;
+	spin_unlock_irq(shost->host_lock);
+	del_timer_sync(&vport->delayed_disc_tmo);
+}
+
+int
+lpfc_linkdown(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_vport **vports;
+	LPFC_MBOXQ_t          *mb;
+	int i;
+
+	if (phba->link_state == LPFC_LINK_DOWN)
+		return 0;
+
+	/* Block all SCSI stack I/Os */
+	lpfc_scsi_dev_block(phba);
+
+	spin_lock_irq(&phba->hbalock);
+	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
+	spin_unlock_irq(&phba->hbalock);
+	if (phba->link_state > LPFC_LINK_DOWN) {
+		phba->link_state = LPFC_LINK_DOWN;
+		spin_lock_irq(shost->host_lock);
+		phba->pport->fc_flag &= ~FC_LBIT;
+		spin_unlock_irq(shost->host_lock);
+	}
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			/* Issue a LINK DOWN event to all nodes */
+			lpfc_linkdown_port(vports[i]);
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+	/* Clean up any firmware default rpi's */
+	mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (mb) {
+		lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
+		mb->vport = vport;
+		mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
+		    == MBX_NOT_FINISHED) {
+			mempool_free(mb, phba->mbox_mem_pool);
+		}
+	}
+
+	/* Setup myDID for link up if we are in pt2pt mode */
+	if (phba->pport->fc_flag & FC_PT2PT) {
+		phba->pport->fc_myDID = 0;
+		mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (mb) {
+			lpfc_config_link(phba, mb);
+			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+			mb->vport = vport;
+			if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
+			    == MBX_NOT_FINISHED) {
+				mempool_free(mb, phba->mbox_mem_pool);
+			}
+		}
+		spin_lock_irq(shost->host_lock);
+		phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
+		spin_unlock_irq(shost->host_lock);
+	}
+
+	return 0;
+}
+
+static void
+lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp;
+
+	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			continue;
+		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+			continue;
+		if (ndlp->nlp_type & NLP_FABRIC) {
+			/* On Linkup its safe to clean up the ndlp
+			 * from Fabric connections.
+			 */
+			if (ndlp->nlp_DID != Fabric_DID)
+				lpfc_unreg_rpi(vport, ndlp);
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
+			/* Fail outstanding IO now since device is
+			 * marked for PLOGI.
+			 */
+			lpfc_unreg_rpi(vport, ndlp);
+		}
+	}
+}
+
+static void
+lpfc_linkup_port(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+
+	if ((vport->load_flag & FC_UNLOADING) != 0)
+		return;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Link Up:         top:x%x speed:x%x flg:x%x",
+		phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
+
+	/* If NPIV is not enabled, only bring the physical port up */
+	if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+		(vport != phba->pport))
+		return;
+
+	fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
+
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
+			    FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
+	vport->fc_flag |= FC_NDISC_ACTIVE;
+	vport->fc_ns_retry = 0;
+	spin_unlock_irq(shost->host_lock);
+
+	if (vport->fc_flag & FC_LBIT)
+		lpfc_linkup_cleanup_nodes(vport);
+
+}
+
+static int
+lpfc_linkup(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	int i;
+
+	lpfc_cleanup_wt_rrqs(phba);
+	phba->link_state = LPFC_LINK_UP;
+
+	/* Unblock fabric iocbs if they are blocked */
+	clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+	del_timer_sync(&phba->fabric_block_timer);
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
+			lpfc_linkup_port(vports[i]);
+	lpfc_destroy_vport_work_array(phba, vports);
+	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+	    (phba->sli_rev < LPFC_SLI_REV4))
+		lpfc_issue_clear_la(phba, phba->pport);
+
+	return 0;
+}
+
+/*
+ * This routine handles processing a CLEAR_LA mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+static void
+lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_vport *vport = pmb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_sli   *psli = &phba->sli;
+	MAILBOX_t *mb = &pmb->u.mb;
+	uint32_t control;
+
+	/* Since we don't do discovery right now, turn these off here */
+	psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
+	psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
+	psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
+
+	/* Check for error */
+	if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
+		/* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+				 "0320 CLEAR_LA mbxStatus error x%x hba "
+				 "state x%x\n",
+				 mb->mbxStatus, vport->port_state);
+		phba->link_state = LPFC_HBA_ERROR;
+		goto out;
+	}
+
+	if (vport->port_type == LPFC_PHYSICAL_PORT)
+		phba->link_state = LPFC_HBA_READY;
+
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag |= LPFC_PROCESS_LA;
+	control = readl(phba->HCregaddr);
+	control |= HC_LAINT_ENA;
+	writel(control, phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+	spin_unlock_irq(&phba->hbalock);
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return;
+
+out:
+	/* Device Discovery completes */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0225 Device Discovery completes\n");
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_ABORT_DISCOVERY;
+	spin_unlock_irq(shost->host_lock);
+
+	lpfc_can_disctmo(vport);
+
+	/* turn on Link Attention interrupts */
+
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag |= LPFC_PROCESS_LA;
+	control = readl(phba->HCregaddr);
+	control |= HC_LAINT_ENA;
+	writel(control, phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+	spin_unlock_irq(&phba->hbalock);
+
+	return;
+}
+
+
+static void
+lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_vport *vport = pmb->vport;
+
+	if (pmb->u.mb.mbxStatus)
+		goto out;
+
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+	/* don't perform discovery for SLI4 loopback diagnostic test */
+	if ((phba->sli_rev == LPFC_SLI_REV4) &&
+	    !(phba->hba_flag & HBA_FCOE_MODE) &&
+	    (phba->link_flag & LS_LOOPBACK_MODE))
+		return;
+
+	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
+	    vport->fc_flag & FC_PUBLIC_LOOP &&
+	    !(vport->fc_flag & FC_LBIT)) {
+			/* Need to wait for FAN - use discovery timer
+			 * for timeout.  port_state is identically
+			 * LPFC_LOCAL_CFG_LINK while waiting for FAN
+			 */
+			lpfc_set_disctmo(vport);
+			return;
+	}
+
+	/* Start discovery by sending a FLOGI. port_state is identically
+	 * LPFC_FLOGI while waiting for FLOGI cmpl
+	 */
+	if (vport->port_state != LPFC_FLOGI)
+		lpfc_initial_flogi(vport);
+	return;
+
+out:
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+			 "0306 CONFIG_LINK mbxStatus error x%x "
+			 "HBA state x%x\n",
+			 pmb->u.mb.mbxStatus, vport->port_state);
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+	lpfc_linkdown(phba);
+
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+			 "0200 CONFIG_LINK bad hba state x%x\n",
+			 vport->port_state);
+
+	lpfc_issue_clear_la(phba, vport);
+	return;
+}
+
+/**
+ * lpfc_sli4_clear_fcf_rr_bmask
+ * @phba pointer to the struct lpfc_hba for this port.
+ * This fucnction resets the round robin bit mask and clears the
+ * fcf priority list. The list deletions are done while holding the
+ * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
+ * from the lpfc_fcf_pri record.
+ **/
+void
+lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
+{
+	struct lpfc_fcf_pri *fcf_pri;
+	struct lpfc_fcf_pri *next_fcf_pri;
+	memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(fcf_pri, next_fcf_pri,
+				&phba->fcf.fcf_pri_list, list) {
+		list_del_init(&fcf_pri->list);
+		fcf_pri->fcf_rec.flag = 0;
+	}
+	spin_unlock_irq(&phba->hbalock);
+}
+static void
+lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_vport *vport = mboxq->vport;
+
+	if (mboxq->u.mb.mbxStatus) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+			 "2017 REG_FCFI mbxStatus error x%x "
+			 "HBA state x%x\n",
+			 mboxq->u.mb.mbxStatus, vport->port_state);
+		goto fail_out;
+	}
+
+	/* Start FCoE discovery by sending a FLOGI. */
+	phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
+	/* Set the FCFI registered flag */
+	spin_lock_irq(&phba->hbalock);
+	phba->fcf.fcf_flag |= FCF_REGISTERED;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* If there is a pending FCoE event, restart FCF table scan. */
+	if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
+		lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
+		goto fail_out;
+
+	/* Mark successful completion of FCF table scan */
+	spin_lock_irq(&phba->hbalock);
+	phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
+	phba->hba_flag &= ~FCF_TS_INPROG;
+	if (vport->port_state != LPFC_FLOGI) {
+		phba->hba_flag |= FCF_RR_INPROG;
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_issue_init_vfi(vport);
+		goto out;
+	}
+	spin_unlock_irq(&phba->hbalock);
+	goto out;
+
+fail_out:
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag &= ~FCF_RR_INPROG;
+	spin_unlock_irq(&phba->hbalock);
+out:
+	mempool_free(mboxq, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_fab_name_match - Check if the fcf fabric name match.
+ * @fab_name: pointer to fabric name.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine compare the fcf record's fabric name with provided
+ * fabric name. If the fabric name are identical this function
+ * returns 1 else return 0.
+ **/
+static uint32_t
+lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
+{
+	if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
+		return 0;
+	if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
+		return 0;
+	if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
+		return 0;
+	if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
+		return 0;
+	if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
+		return 0;
+	if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
+		return 0;
+	if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
+		return 0;
+	if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
+		return 0;
+	return 1;
+}
+
+/**
+ * lpfc_sw_name_match - Check if the fcf switch name match.
+ * @fab_name: pointer to fabric name.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine compare the fcf record's switch name with provided
+ * switch name. If the switch name are identical this function
+ * returns 1 else return 0.
+ **/
+static uint32_t
+lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
+{
+	if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
+		return 0;
+	if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
+		return 0;
+	if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
+		return 0;
+	if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
+		return 0;
+	if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
+		return 0;
+	if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
+		return 0;
+	if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
+		return 0;
+	if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
+		return 0;
+	return 1;
+}
+
+/**
+ * lpfc_mac_addr_match - Check if the fcf mac address match.
+ * @mac_addr: pointer to mac address.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine compare the fcf record's mac address with HBA's
+ * FCF mac address. If the mac addresses are identical this function
+ * returns 1 else return 0.
+ **/
+static uint32_t
+lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
+{
+	if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
+		return 0;
+	if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
+		return 0;
+	if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
+		return 0;
+	if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
+		return 0;
+	if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
+		return 0;
+	if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
+		return 0;
+	return 1;
+}
+
+static bool
+lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
+{
+	return (curr_vlan_id == new_vlan_id);
+}
+
+/**
+ * lpfc_update_fcf_record - Update driver fcf record
+ * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: Index for the lpfc_fcf_record.
+ * @new_fcf_record: pointer to hba fcf record.
+ *
+ * This routine updates the driver FCF priority record from the new HBA FCF
+ * record. This routine is called with the host lock held.
+ **/
+static void
+__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
+				 struct fcf_record *new_fcf_record
+				 )
+{
+	struct lpfc_fcf_pri *fcf_pri;
+
+	fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+	fcf_pri->fcf_rec.fcf_index = fcf_index;
+	/* FCF record priority */
+	fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
+
+}
+
+/**
+ * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
+ * @fcf: pointer to driver fcf record.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine copies the FCF information from the FCF
+ * record to lpfc_hba data structure.
+ **/
+static void
+lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
+		     struct fcf_record *new_fcf_record)
+{
+	/* Fabric name */
+	fcf_rec->fabric_name[0] =
+		bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
+	fcf_rec->fabric_name[1] =
+		bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
+	fcf_rec->fabric_name[2] =
+		bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
+	fcf_rec->fabric_name[3] =
+		bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
+	fcf_rec->fabric_name[4] =
+		bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
+	fcf_rec->fabric_name[5] =
+		bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
+	fcf_rec->fabric_name[6] =
+		bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
+	fcf_rec->fabric_name[7] =
+		bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
+	/* Mac address */
+	fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
+	fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
+	fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
+	fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
+	fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
+	fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
+	/* FCF record index */
+	fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+	/* FCF record priority */
+	fcf_rec->priority = new_fcf_record->fip_priority;
+	/* Switch name */
+	fcf_rec->switch_name[0] =
+		bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
+	fcf_rec->switch_name[1] =
+		bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
+	fcf_rec->switch_name[2] =
+		bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
+	fcf_rec->switch_name[3] =
+		bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
+	fcf_rec->switch_name[4] =
+		bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
+	fcf_rec->switch_name[5] =
+		bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
+	fcf_rec->switch_name[6] =
+		bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
+	fcf_rec->switch_name[7] =
+		bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
+}
+
+/**
+ * lpfc_update_fcf_record - Update driver fcf record
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_rec: pointer to driver fcf record.
+ * @new_fcf_record: pointer to hba fcf record.
+ * @addr_mode: address mode to be set to the driver fcf record.
+ * @vlan_id: vlan tag to be set to the driver fcf record.
+ * @flag: flag bits to be set to the driver fcf record.
+ *
+ * This routine updates the driver FCF record from the new HBA FCF record
+ * together with the address mode, vlan_id, and other informations. This
+ * routine is called with the host lock held.
+ **/
+static void
+__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
+		       struct fcf_record *new_fcf_record, uint32_t addr_mode,
+		       uint16_t vlan_id, uint32_t flag)
+{
+	/* Copy the fields from the HBA's FCF record */
+	lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
+	/* Update other fields of driver FCF record */
+	fcf_rec->addr_mode = addr_mode;
+	fcf_rec->vlan_id = vlan_id;
+	fcf_rec->flag |= (flag | RECORD_VALID);
+	__lpfc_update_fcf_record_pri(phba,
+		bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
+				 new_fcf_record);
+}
+
+/**
+ * lpfc_register_fcf - Register the FCF with hba.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine issues a register fcfi mailbox command to register
+ * the fcf with HBA.
+ **/
+static void
+lpfc_register_fcf(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *fcf_mbxq;
+	int rc;
+
+	spin_lock_irq(&phba->hbalock);
+	/* If the FCF is not available do nothing. */
+	if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
+		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+
+	/* The FCF is already registered, start discovery */
+	if (phba->fcf.fcf_flag & FCF_REGISTERED) {
+		phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
+		phba->hba_flag &= ~FCF_TS_INPROG;
+		if (phba->pport->port_state != LPFC_FLOGI) {
+			phba->hba_flag |= FCF_RR_INPROG;
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_initial_flogi(phba->pport);
+			return;
+		}
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!fcf_mbxq) {
+		spin_lock_irq(&phba->hbalock);
+		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+
+	lpfc_reg_fcfi(phba, fcf_mbxq);
+	fcf_mbxq->vport = phba->pport;
+	fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
+	rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		spin_lock_irq(&phba->hbalock);
+		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
+		spin_unlock_irq(&phba->hbalock);
+		mempool_free(fcf_mbxq, phba->mbox_mem_pool);
+	}
+
+	return;
+}
+
+/**
+ * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
+ * @phba: pointer to lpfc hba data structure.
+ * @new_fcf_record: pointer to fcf record.
+ * @boot_flag: Indicates if this record used by boot bios.
+ * @addr_mode: The address mode to be used by this FCF
+ * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
+ *
+ * This routine compare the fcf record with connect list obtained from the
+ * config region to decide if this FCF can be used for SAN discovery. It returns
+ * 1 if this record can be used for SAN discovery else return zero. If this FCF
+ * record can be used for SAN discovery, the boot_flag will indicate if this FCF
+ * is used by boot bios and addr_mode will indicate the addressing mode to be
+ * used for this FCF when the function returns.
+ * If the FCF record need to be used with a particular vlan id, the vlan is
+ * set in the vlan_id on return of the function. If not VLAN tagging need to
+ * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
+ **/
+static int
+lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
+			struct fcf_record *new_fcf_record,
+			uint32_t *boot_flag, uint32_t *addr_mode,
+			uint16_t *vlan_id)
+{
+	struct lpfc_fcf_conn_entry *conn_entry;
+	int i, j, fcf_vlan_id = 0;
+
+	/* Find the lowest VLAN id in the FCF record */
+	for (i = 0; i < 512; i++) {
+		if (new_fcf_record->vlan_bitmap[i]) {
+			fcf_vlan_id = i * 8;
+			j = 0;
+			while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
+				j++;
+				fcf_vlan_id++;
+			}
+			break;
+		}
+	}
+
+	/* If FCF not available return 0 */
+	if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
+		!bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record))
+		return 0;
+
+	if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
+		*boot_flag = 0;
+		*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
+				new_fcf_record);
+		if (phba->valid_vlan)
+			*vlan_id = phba->vlan_id;
+		else
+			*vlan_id = LPFC_FCOE_NULL_VID;
+		return 1;
+	}
+
+	/*
+	 * If there are no FCF connection table entry, driver connect to all
+	 * FCFs.
+	 */
+	if (list_empty(&phba->fcf_conn_rec_list)) {
+		*boot_flag = 0;
+		*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
+			new_fcf_record);
+
+		/*
+		 * When there are no FCF connect entries, use driver's default
+		 * addressing mode - FPMA.
+		 */
+		if (*addr_mode & LPFC_FCF_FPMA)
+			*addr_mode = LPFC_FCF_FPMA;
+
+		/* If FCF record report a vlan id use that vlan id */
+		if (fcf_vlan_id)
+			*vlan_id = fcf_vlan_id;
+		else
+			*vlan_id = LPFC_FCOE_NULL_VID;
+		return 1;
+	}
+
+	list_for_each_entry(conn_entry,
+			    &phba->fcf_conn_rec_list, list) {
+		if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
+			continue;
+
+		if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
+			!lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
+					     new_fcf_record))
+			continue;
+		if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
+			!lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
+					    new_fcf_record))
+			continue;
+		if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
+			/*
+			 * If the vlan bit map does not have the bit set for the
+			 * vlan id to be used, then it is not a match.
+			 */
+			if (!(new_fcf_record->vlan_bitmap
+				[conn_entry->conn_rec.vlan_tag / 8] &
+				(1 << (conn_entry->conn_rec.vlan_tag % 8))))
+				continue;
+		}
+
+		/*
+		 * If connection record does not support any addressing mode,
+		 * skip the FCF record.
+		 */
+		if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
+			& (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
+			continue;
+
+		/*
+		 * Check if the connection record specifies a required
+		 * addressing mode.
+		 */
+		if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+			!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
+
+			/*
+			 * If SPMA required but FCF not support this continue.
+			 */
+			if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+				!(bf_get(lpfc_fcf_record_mac_addr_prov,
+					new_fcf_record) & LPFC_FCF_SPMA))
+				continue;
+
+			/*
+			 * If FPMA required but FCF not support this continue.
+			 */
+			if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+				!(bf_get(lpfc_fcf_record_mac_addr_prov,
+				new_fcf_record) & LPFC_FCF_FPMA))
+				continue;
+		}
+
+		/*
+		 * This fcf record matches filtering criteria.
+		 */
+		if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
+			*boot_flag = 1;
+		else
+			*boot_flag = 0;
+
+		/*
+		 * If user did not specify any addressing mode, or if the
+		 * preferred addressing mode specified by user is not supported
+		 * by FCF, allow fabric to pick the addressing mode.
+		 */
+		*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
+				new_fcf_record);
+		/*
+		 * If the user specified a required address mode, assign that
+		 * address mode
+		 */
+		if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+			(!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
+			*addr_mode = (conn_entry->conn_rec.flags &
+				FCFCNCT_AM_SPMA) ?
+				LPFC_FCF_SPMA : LPFC_FCF_FPMA;
+		/*
+		 * If the user specified a preferred address mode, use the
+		 * addr mode only if FCF support the addr_mode.
+		 */
+		else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+			(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
+			(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+			(*addr_mode & LPFC_FCF_SPMA))
+				*addr_mode = LPFC_FCF_SPMA;
+		else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+			(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
+			!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+			(*addr_mode & LPFC_FCF_FPMA))
+				*addr_mode = LPFC_FCF_FPMA;
+
+		/* If matching connect list has a vlan id, use it */
+		if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
+			*vlan_id = conn_entry->conn_rec.vlan_tag;
+		/*
+		 * If no vlan id is specified in connect list, use the vlan id
+		 * in the FCF record
+		 */
+		else if (fcf_vlan_id)
+			*vlan_id = fcf_vlan_id;
+		else
+			*vlan_id = LPFC_FCOE_NULL_VID;
+
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
+ * @phba: pointer to lpfc hba data structure.
+ * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
+ *
+ * This function check if there is any fcoe event pending while driver
+ * scan FCF entries. If there is any pending event, it will restart the
+ * FCF saning and return 1 else return 0.
+ */
+int
+lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
+{
+	/*
+	 * If the Link is up and no FCoE events while in the
+	 * FCF discovery, no need to restart FCF discovery.
+	 */
+	if ((phba->link_state  >= LPFC_LINK_UP) &&
+	    (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
+		return 0;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"2768 Pending link or FCF event during current "
+			"handling of the previous event: link_state:x%x, "
+			"evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
+			phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
+			phba->fcoe_eventtag);
+
+	spin_lock_irq(&phba->hbalock);
+	phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
+	spin_unlock_irq(&phba->hbalock);
+
+	if (phba->link_state >= LPFC_LINK_UP) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+				"2780 Restart FCF table scan due to "
+				"pending FCF event:evt_tag_at_scan:x%x, "
+				"evt_tag_current:x%x\n",
+				phba->fcoe_eventtag_at_fcf_scan,
+				phba->fcoe_eventtag);
+		lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
+	} else {
+		/*
+		 * Do not continue FCF discovery and clear FCF_TS_INPROG
+		 * flag
+		 */
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+				"2833 Stop FCF discovery process due to link "
+				"state change (x%x)\n", phba->link_state);
+		spin_lock_irq(&phba->hbalock);
+		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
+		phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
+		spin_unlock_irq(&phba->hbalock);
+	}
+
+	/* Unregister the currently registered FCF if required */
+	if (unreg_fcf) {
+		spin_lock_irq(&phba->hbalock);
+		phba->fcf.fcf_flag &= ~FCF_REGISTERED;
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_sli4_unregister_fcf(phba);
+	}
+	return 1;
+}
+
+/**
+ * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_cnt: number of eligible fcf record seen so far.
+ *
+ * This function makes an running random selection decision on FCF record to
+ * use through a sequence of @fcf_cnt eligible FCF records with equal
+ * probability. To perform integer manunipulation of random numbers with
+ * size unit32_t, the lower 16 bits of the 32-bit random number returned
+ * from random32() are taken as the random random number generated.
+ *
+ * Returns true when outcome is for the newly read FCF record should be
+ * chosen; otherwise, return false when outcome is for keeping the previously
+ * chosen FCF record.
+ **/
+static bool
+lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
+{
+	uint32_t rand_num;
+
+	/* Get 16-bit uniform random number */
+	rand_num = (0xFFFF & random32());
+
+	/* Decision with probability 1/fcf_cnt */
+	if ((fcf_cnt * rand_num) < 0xFFFF)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ * @next_fcf_index: pointer to holder of next fcf index.
+ *
+ * This routine parses the non-embedded fcf mailbox command by performing the
+ * necessarily error checking, non-embedded read FCF record mailbox command
+ * SGE parsing, and endianness swapping.
+ *
+ * Returns the pointer to the new FCF record in the non-embedded mailbox
+ * command DMA memory if successfully, other NULL.
+ */
+static struct fcf_record *
+lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
+			     uint16_t *next_fcf_index)
+{
+	void *virt_addr;
+	dma_addr_t phys_addr;
+	struct lpfc_mbx_sge sge;
+	struct lpfc_mbx_read_fcf_tbl *read_fcf;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+	struct fcf_record *new_fcf_record;
+
+	/* Get the first SGE entry from the non-embedded DMA memory. This
+	 * routine only uses a single SGE.
+	 */
+	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
+	phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
+	if (unlikely(!mboxq->sge_array)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"2524 Failed to get the non-embedded SGE "
+				"virtual address\n");
+		return NULL;
+	}
+	virt_addr = mboxq->sge_array->addr[0];
+
+	shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status) {
+		if (shdr_status == STATUS_FCF_TABLE_EMPTY)
+			lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+					"2726 READ_FCF_RECORD Indicates empty "
+					"FCF table.\n");
+		else
+			lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+					"2521 READ_FCF_RECORD mailbox failed "
+					"with status x%x add_status x%x, "
+					"mbx\n", shdr_status, shdr_add_status);
+		return NULL;
+	}
+
+	/* Interpreting the returned information of the FCF record */
+	read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
+	lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
+			      sizeof(struct lpfc_mbx_read_fcf_tbl));
+	*next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
+	new_fcf_record = (struct fcf_record *)(virt_addr +
+			  sizeof(struct lpfc_mbx_read_fcf_tbl));
+	lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
+				offsetof(struct fcf_record, vlan_bitmap));
+	new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
+	new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
+
+	return new_fcf_record;
+}
+
+/**
+ * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_record: pointer to the fcf record.
+ * @vlan_id: the lowest vlan identifier associated to this fcf record.
+ * @next_fcf_index: the index to the next fcf record in hba's fcf table.
+ *
+ * This routine logs the detailed FCF record if the LOG_FIP loggin is
+ * enabled.
+ **/
+static void
+lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
+			      struct fcf_record *fcf_record,
+			      uint16_t vlan_id,
+			      uint16_t next_fcf_index)
+{
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"2764 READ_FCF_RECORD:\n"
+			"\tFCF_Index     : x%x\n"
+			"\tFCF_Avail     : x%x\n"
+			"\tFCF_Valid     : x%x\n"
+			"\tFIP_Priority  : x%x\n"
+			"\tMAC_Provider  : x%x\n"
+			"\tLowest VLANID : x%x\n"
+			"\tFCF_MAC Addr  : x%x:%x:%x:%x:%x:%x\n"
+			"\tFabric_Name   : x%x:%x:%x:%x:%x:%x:%x:%x\n"
+			"\tSwitch_Name   : x%x:%x:%x:%x:%x:%x:%x:%x\n"
+			"\tNext_FCF_Index: x%x\n",
+			bf_get(lpfc_fcf_record_fcf_index, fcf_record),
+			bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
+			bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
+			fcf_record->fip_priority,
+			bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
+			vlan_id,
+			bf_get(lpfc_fcf_record_mac_0, fcf_record),
+			bf_get(lpfc_fcf_record_mac_1, fcf_record),
+			bf_get(lpfc_fcf_record_mac_2, fcf_record),
+			bf_get(lpfc_fcf_record_mac_3, fcf_record),
+			bf_get(lpfc_fcf_record_mac_4, fcf_record),
+			bf_get(lpfc_fcf_record_mac_5, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
+			next_fcf_index);
+}
+
+/**
+ lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_rec: pointer to an existing FCF record.
+ * @new_fcf_record: pointer to a new FCF record.
+ * @new_vlan_id: vlan id from the new FCF record.
+ *
+ * This function performs matching test of a new FCF record against an existing
+ * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
+ * will not be used as part of the FCF record matching criteria.
+ *
+ * Returns true if all the fields matching, otherwise returns false.
+ */
+static bool
+lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
+			   struct lpfc_fcf_rec *fcf_rec,
+			   struct fcf_record *new_fcf_record,
+			   uint16_t new_vlan_id)
+{
+	if (new_vlan_id != LPFC_FCOE_IGNORE_VID)
+		if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id))
+			return false;
+	if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record))
+		return false;
+	if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record))
+		return false;
+	if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
+		return false;
+	if (fcf_rec->priority != new_fcf_record->fip_priority)
+		return false;
+	return true;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
+ * @vport: Pointer to vport object.
+ * @fcf_index: index to next fcf.
+ *
+ * This function processing the roundrobin fcf failover to next fcf index.
+ * When this function is invoked, there will be a current fcf registered
+ * for flogi.
+ * Return: 0 for continue retrying flogi on currently registered fcf;
+ *         1 for stop flogi on currently registered fcf;
+ */
+int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
+{
+	struct lpfc_hba *phba = vport->phba;
+	int rc;
+
+	if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
+		spin_lock_irq(&phba->hbalock);
+		if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2872 Devloss tmo with no eligible "
+					"FCF, unregister in-use FCF (x%x) "
+					"and rescan FCF table\n",
+					phba->fcf.current_rec.fcf_indx);
+			lpfc_unregister_fcf_rescan(phba);
+			goto stop_flogi_current_fcf;
+		}
+		/* Mark the end to FLOGI roundrobin failover */
+		phba->hba_flag &= ~FCF_RR_INPROG;
+		/* Allow action to new fcf asynchronous event */
+		phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+				"2865 No FCF available, stop roundrobin FCF "
+				"failover and change port state:x%x/x%x\n",
+				phba->pport->port_state, LPFC_VPORT_UNKNOWN);
+		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+		goto stop_flogi_current_fcf;
+	} else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
+				"2794 Try FLOGI roundrobin FCF failover to "
+				"(x%x)\n", fcf_index);
+		rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
+		if (rc)
+			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
+					"2761 FLOGI roundrobin FCF failover "
+					"failed (rc:x%x) to read FCF (x%x)\n",
+					rc, phba->fcf.current_rec.fcf_indx);
+		else
+			goto stop_flogi_current_fcf;
+	}
+	return 0;
+
+stop_flogi_current_fcf:
+	lpfc_can_disctmo(vport);
+	return 1;
+}
+
+/**
+ * lpfc_sli4_fcf_pri_list_del
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index the index of the fcf record to delete
+ * This routine checks the on list flag of the fcf_index to be deleted.
+ * If it is one the list then it is removed from the list, and the flag
+ * is cleared. This routine grab the hbalock before removing the fcf
+ * record from the list.
+ **/
+static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
+			uint16_t fcf_index)
+{
+	struct lpfc_fcf_pri *new_fcf_pri;
+
+	new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+		"3058 deleting idx x%x pri x%x flg x%x\n",
+		fcf_index, new_fcf_pri->fcf_rec.priority,
+		 new_fcf_pri->fcf_rec.flag);
+	spin_lock_irq(&phba->hbalock);
+	if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
+		if (phba->fcf.current_rec.priority ==
+				new_fcf_pri->fcf_rec.priority)
+			phba->fcf.eligible_fcf_cnt--;
+		list_del_init(&new_fcf_pri->list);
+		new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
+	}
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_set_fcf_flogi_fail
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index the index of the fcf record to update
+ * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
+ * flag so the the round robin slection for the particular priority level
+ * will try a different fcf record that does not have this bit set.
+ * If the fcf record is re-read for any reason this flag is cleared brfore
+ * adding it to the priority list.
+ **/
+void
+lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+	struct lpfc_fcf_pri *new_fcf_pri;
+	new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+	spin_lock_irq(&phba->hbalock);
+	new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_fcf_pri_list_add
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index the index of the fcf record to add
+ * This routine checks the priority of the fcf_index to be added.
+ * If it is a lower priority than the current head of the fcf_pri list
+ * then it is added to the list in the right order.
+ * If it is the same priority as the current head of the list then it
+ * is added to the head of the list and its bit in the rr_bmask is set.
+ * If the fcf_index to be added is of a higher priority than the current
+ * head of the list then the rr_bmask is cleared, its bit is set in the
+ * rr_bmask and it is added to the head of the list.
+ * returns:
+ * 0=success 1=failure
+ **/
+int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index,
+	struct fcf_record *new_fcf_record)
+{
+	uint16_t current_fcf_pri;
+	uint16_t last_index;
+	struct lpfc_fcf_pri *fcf_pri;
+	struct lpfc_fcf_pri *next_fcf_pri;
+	struct lpfc_fcf_pri *new_fcf_pri;
+	int ret;
+
+	new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+		"3059 adding idx x%x pri x%x flg x%x\n",
+		fcf_index, new_fcf_record->fip_priority,
+		 new_fcf_pri->fcf_rec.flag);
+	spin_lock_irq(&phba->hbalock);
+	if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
+		list_del_init(&new_fcf_pri->list);
+	new_fcf_pri->fcf_rec.fcf_index = fcf_index;
+	new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
+	if (list_empty(&phba->fcf.fcf_pri_list)) {
+		list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
+		ret = lpfc_sli4_fcf_rr_index_set(phba,
+				new_fcf_pri->fcf_rec.fcf_index);
+		goto out;
+	}
+
+	last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
+				LPFC_SLI4_FCF_TBL_INDX_MAX);
+	if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+		ret = 0; /* Empty rr list */
+		goto out;
+	}
+	current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
+	if (new_fcf_pri->fcf_rec.priority <=  current_fcf_pri) {
+		list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
+		if (new_fcf_pri->fcf_rec.priority <  current_fcf_pri) {
+			memset(phba->fcf.fcf_rr_bmask, 0,
+				sizeof(*phba->fcf.fcf_rr_bmask));
+			/* fcfs_at_this_priority_level = 1; */
+			phba->fcf.eligible_fcf_cnt = 1;
+		} else
+			/* fcfs_at_this_priority_level++; */
+			phba->fcf.eligible_fcf_cnt++;
+		ret = lpfc_sli4_fcf_rr_index_set(phba,
+				new_fcf_pri->fcf_rec.fcf_index);
+		goto out;
+	}
+
+	list_for_each_entry_safe(fcf_pri, next_fcf_pri,
+				&phba->fcf.fcf_pri_list, list) {
+		if (new_fcf_pri->fcf_rec.priority <=
+				fcf_pri->fcf_rec.priority) {
+			if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
+				list_add(&new_fcf_pri->list,
+						&phba->fcf.fcf_pri_list);
+			else
+				list_add(&new_fcf_pri->list,
+					 &((struct lpfc_fcf_pri *)
+					fcf_pri->list.prev)->list);
+			ret = 0;
+			goto out;
+		} else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
+			|| new_fcf_pri->fcf_rec.priority <
+				next_fcf_pri->fcf_rec.priority) {
+			list_add(&new_fcf_pri->list, &fcf_pri->list);
+			ret = 0;
+			goto out;
+		}
+		if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
+			continue;
+
+	}
+	ret = 1;
+out:
+	/* we use = instead of |= to clear the FLOGI_FAILED flag. */
+	new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
+	spin_unlock_irq(&phba->hbalock);
+	return ret;
+}
+
+/**
+ * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ *
+ * This function iterates through all the fcf records available in
+ * HBA and chooses the optimal FCF record for discovery. After finding
+ * the FCF for discovery it registers the FCF record and kicks start
+ * discovery.
+ * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
+ * use an FCF record which matches fabric name and mac address of the
+ * currently used FCF record.
+ * If the driver supports only one FCF, it will try to use the FCF record
+ * used by BOOT_BIOS.
+ */
+void
+lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct fcf_record *new_fcf_record;
+	uint32_t boot_flag, addr_mode;
+	uint16_t fcf_index, next_fcf_index;
+	struct lpfc_fcf_rec *fcf_rec = NULL;
+	uint16_t vlan_id;
+	uint32_t seed;
+	bool select_new_fcf;
+	int rc;
+
+	/* If there is pending FCoE event restart FCF table scan */
+	if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		return;
+	}
+
+	/* Parse the FCF record from the non-embedded mailbox command */
+	new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
+						      &next_fcf_index);
+	if (!new_fcf_record) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+				"2765 Mailbox command READ_FCF_RECORD "
+				"failed to retrieve a FCF record.\n");
+		/* Let next new FCF event trigger fast failover */
+		spin_lock_irq(&phba->hbalock);
+		phba->hba_flag &= ~FCF_TS_INPROG;
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		return;
+	}
+
+	/* Check the FCF record against the connection list */
+	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
+				      &addr_mode, &vlan_id);
+
+	/* Log the FCF record information if turned on */
+	lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
+				      next_fcf_index);
+
+	/*
+	 * If the fcf record does not match with connect list entries
+	 * read the next entry; otherwise, this is an eligible FCF
+	 * record for roundrobin FCF failover.
+	 */
+	if (!rc) {
+		lpfc_sli4_fcf_pri_list_del(phba,
+					bf_get(lpfc_fcf_record_fcf_index,
+					       new_fcf_record));
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+				"2781 FCF (x%x) failed connection "
+				"list check: (x%x/x%x)\n",
+				bf_get(lpfc_fcf_record_fcf_index,
+				       new_fcf_record),
+				bf_get(lpfc_fcf_record_fcf_avail,
+				       new_fcf_record),
+				bf_get(lpfc_fcf_record_fcf_valid,
+				       new_fcf_record));
+		if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
+		    lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
+		    new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
+			if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
+			    phba->fcf.current_rec.fcf_indx) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+					"2862 FCF (x%x) matches property "
+					"of in-use FCF (x%x)\n",
+					bf_get(lpfc_fcf_record_fcf_index,
+					       new_fcf_record),
+					phba->fcf.current_rec.fcf_indx);
+				goto read_next_fcf;
+			}
+			/*
+			 * In case the current in-use FCF record becomes
+			 * invalid/unavailable during FCF discovery that
+			 * was not triggered by fast FCF failover process,
+			 * treat it as fast FCF failover.
+			 */
+			if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) &&
+			    !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
+				lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+						"2835 Invalid in-use FCF "
+						"(x%x), enter FCF failover "
+						"table scan.\n",
+						phba->fcf.current_rec.fcf_indx);
+				spin_lock_irq(&phba->hbalock);
+				phba->fcf.fcf_flag |= FCF_REDISC_FOV;
+				spin_unlock_irq(&phba->hbalock);
+				lpfc_sli4_mbox_cmd_free(phba, mboxq);
+				lpfc_sli4_fcf_scan_read_fcf_rec(phba,
+						LPFC_FCOE_FCF_GET_FIRST);
+				return;
+			}
+		}
+		goto read_next_fcf;
+	} else {
+		fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+		rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
+							new_fcf_record);
+		if (rc)
+			goto read_next_fcf;
+	}
+
+	/*
+	 * If this is not the first FCF discovery of the HBA, use last
+	 * FCF record for the discovery. The condition that a rescan
+	 * matches the in-use FCF record: fabric name, switch name, mac
+	 * address, and vlan_id.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	if (phba->fcf.fcf_flag & FCF_IN_USE) {
+		if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
+			lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
+		    new_fcf_record, vlan_id)) {
+			if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
+			    phba->fcf.current_rec.fcf_indx) {
+				phba->fcf.fcf_flag |= FCF_AVAILABLE;
+				if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
+					/* Stop FCF redisc wait timer */
+					__lpfc_sli4_stop_fcf_redisc_wait_timer(
+									phba);
+				else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
+					/* Fast failover, mark completed */
+					phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
+				spin_unlock_irq(&phba->hbalock);
+				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+						"2836 New FCF matches in-use "
+						"FCF (x%x)\n",
+						phba->fcf.current_rec.fcf_indx);
+				goto out;
+			} else
+				lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+					"2863 New FCF (x%x) matches "
+					"property of in-use FCF (x%x)\n",
+					bf_get(lpfc_fcf_record_fcf_index,
+					       new_fcf_record),
+					phba->fcf.current_rec.fcf_indx);
+		}
+		/*
+		 * Read next FCF record from HBA searching for the matching
+		 * with in-use record only if not during the fast failover
+		 * period. In case of fast failover period, it shall try to
+		 * determine whether the FCF record just read should be the
+		 * next candidate.
+		 */
+		if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
+			spin_unlock_irq(&phba->hbalock);
+			goto read_next_fcf;
+		}
+	}
+	/*
+	 * Update on failover FCF record only if it's in FCF fast-failover
+	 * period; otherwise, update on current FCF record.
+	 */
+	if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
+		fcf_rec = &phba->fcf.failover_rec;
+	else
+		fcf_rec = &phba->fcf.current_rec;
+
+	if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
+		/*
+		 * If the driver FCF record does not have boot flag
+		 * set and new hba fcf record has boot flag set, use
+		 * the new hba fcf record.
+		 */
+		if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
+			/* Choose this FCF record */
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2837 Update current FCF record "
+					"(x%x) with new FCF record (x%x)\n",
+					fcf_rec->fcf_indx,
+					bf_get(lpfc_fcf_record_fcf_index,
+					new_fcf_record));
+			__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
+					addr_mode, vlan_id, BOOT_ENABLE);
+			spin_unlock_irq(&phba->hbalock);
+			goto read_next_fcf;
+		}
+		/*
+		 * If the driver FCF record has boot flag set and the
+		 * new hba FCF record does not have boot flag, read
+		 * the next FCF record.
+		 */
+		if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
+			spin_unlock_irq(&phba->hbalock);
+			goto read_next_fcf;
+		}
+		/*
+		 * If the new hba FCF record has lower priority value
+		 * than the driver FCF record, use the new record.
+		 */
+		if (new_fcf_record->fip_priority < fcf_rec->priority) {
+			/* Choose the new FCF record with lower priority */
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2838 Update current FCF record "
+					"(x%x) with new FCF record (x%x)\n",
+					fcf_rec->fcf_indx,
+					bf_get(lpfc_fcf_record_fcf_index,
+					       new_fcf_record));
+			__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
+					addr_mode, vlan_id, 0);
+			/* Reset running random FCF selection count */
+			phba->fcf.eligible_fcf_cnt = 1;
+		} else if (new_fcf_record->fip_priority == fcf_rec->priority) {
+			/* Update running random FCF selection count */
+			phba->fcf.eligible_fcf_cnt++;
+			select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
+						phba->fcf.eligible_fcf_cnt);
+			if (select_new_fcf) {
+				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2839 Update current FCF record "
+					"(x%x) with new FCF record (x%x)\n",
+					fcf_rec->fcf_indx,
+					bf_get(lpfc_fcf_record_fcf_index,
+					       new_fcf_record));
+				/* Choose the new FCF by random selection */
+				__lpfc_update_fcf_record(phba, fcf_rec,
+							 new_fcf_record,
+							 addr_mode, vlan_id, 0);
+			}
+		}
+		spin_unlock_irq(&phba->hbalock);
+		goto read_next_fcf;
+	}
+	/*
+	 * This is the first suitable FCF record, choose this record for
+	 * initial best-fit FCF.
+	 */
+	if (fcf_rec) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+				"2840 Update initial FCF candidate "
+				"with FCF (x%x)\n",
+				bf_get(lpfc_fcf_record_fcf_index,
+				       new_fcf_record));
+		__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
+					 addr_mode, vlan_id, (boot_flag ?
+					 BOOT_ENABLE : 0));
+		phba->fcf.fcf_flag |= FCF_AVAILABLE;
+		/* Setup initial running random FCF selection count */
+		phba->fcf.eligible_fcf_cnt = 1;
+		/* Seeding the random number generator for random selection */
+		seed = (uint32_t)(0xFFFFFFFF & jiffies);
+		srandom32(seed);
+	}
+	spin_unlock_irq(&phba->hbalock);
+	goto read_next_fcf;
+
+read_next_fcf:
+	lpfc_sli4_mbox_cmd_free(phba, mboxq);
+	if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
+		if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
+			/*
+			 * Case of FCF fast failover scan
+			 */
+
+			/*
+			 * It has not found any suitable FCF record, cancel
+			 * FCF scan inprogress, and do nothing
+			 */
+			if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
+				lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+					       "2782 No suitable FCF found: "
+					       "(x%x/x%x)\n",
+					       phba->fcoe_eventtag_at_fcf_scan,
+					       bf_get(lpfc_fcf_record_fcf_index,
+						      new_fcf_record));
+				spin_lock_irq(&phba->hbalock);
+				if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+					phba->hba_flag &= ~FCF_TS_INPROG;
+					spin_unlock_irq(&phba->hbalock);
+					/* Unregister in-use FCF and rescan */
+					lpfc_printf_log(phba, KERN_INFO,
+							LOG_FIP,
+							"2864 On devloss tmo "
+							"unreg in-use FCF and "
+							"rescan FCF table\n");
+					lpfc_unregister_fcf_rescan(phba);
+					return;
+				}
+				/*
+				 * Let next new FCF event trigger fast failover
+				 */
+				phba->hba_flag &= ~FCF_TS_INPROG;
+				spin_unlock_irq(&phba->hbalock);
+				return;
+			}
+			/*
+			 * It has found a suitable FCF record that is not
+			 * the same as in-use FCF record, unregister the
+			 * in-use FCF record, replace the in-use FCF record
+			 * with the new FCF record, mark FCF fast failover
+			 * completed, and then start register the new FCF
+			 * record.
+			 */
+
+			/* Unregister the current in-use FCF record */
+			lpfc_unregister_fcf(phba);
+
+			/* Replace in-use record with the new record */
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2842 Replace in-use FCF (x%x) "
+					"with failover FCF (x%x)\n",
+					phba->fcf.current_rec.fcf_indx,
+					phba->fcf.failover_rec.fcf_indx);
+			memcpy(&phba->fcf.current_rec,
+			       &phba->fcf.failover_rec,
+			       sizeof(struct lpfc_fcf_rec));
+			/*
+			 * Mark the fast FCF failover rediscovery completed
+			 * and the start of the first round of the roundrobin
+			 * FCF failover.
+			 */
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
+			spin_unlock_irq(&phba->hbalock);
+			/* Register to the new FCF record */
+			lpfc_register_fcf(phba);
+		} else {
+			/*
+			 * In case of transaction period to fast FCF failover,
+			 * do nothing when search to the end of the FCF table.
+			 */
+			if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
+			    (phba->fcf.fcf_flag & FCF_REDISC_PEND))
+				return;
+
+			if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
+				phba->fcf.fcf_flag & FCF_IN_USE) {
+				/*
+				 * In case the current in-use FCF record no
+				 * longer existed during FCF discovery that
+				 * was not triggered by fast FCF failover
+				 * process, treat it as fast FCF failover.
+				 */
+				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+						"2841 In-use FCF record (x%x) "
+						"not reported, entering fast "
+						"FCF failover mode scanning.\n",
+						phba->fcf.current_rec.fcf_indx);
+				spin_lock_irq(&phba->hbalock);
+				phba->fcf.fcf_flag |= FCF_REDISC_FOV;
+				spin_unlock_irq(&phba->hbalock);
+				lpfc_sli4_fcf_scan_read_fcf_rec(phba,
+						LPFC_FCOE_FCF_GET_FIRST);
+				return;
+			}
+			/* Register to the new FCF record */
+			lpfc_register_fcf(phba);
+		}
+	} else
+		lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
+	return;
+
+out:
+	lpfc_sli4_mbox_cmd_free(phba, mboxq);
+	lpfc_register_fcf(phba);
+
+	return;
+}
+
+/**
+ * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ *
+ * This is the callback function for FLOGI failure roundrobin FCF failover
+ * read FCF record mailbox command from the eligible FCF record bmask for
+ * performing the failover. If the FCF read back is not valid/available, it
+ * fails through to retrying FLOGI to the currently registered FCF again.
+ * Otherwise, if the FCF read back is valid and available, it will set the
+ * newly read FCF record to the failover FCF record, unregister currently
+ * registered FCF record, copy the failover FCF record to the current
+ * FCF record, and then register the current FCF record before proceeding
+ * to trying FLOGI on the new failover FCF.
+ */
+void
+lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct fcf_record *new_fcf_record;
+	uint32_t boot_flag, addr_mode;
+	uint16_t next_fcf_index, fcf_index;
+	uint16_t current_fcf_index;
+	uint16_t vlan_id;
+	int rc;
+
+	/* If link state is not up, stop the roundrobin failover process */
+	if (phba->link_state < LPFC_LINK_UP) {
+		spin_lock_irq(&phba->hbalock);
+		phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+		phba->hba_flag &= ~FCF_RR_INPROG;
+		spin_unlock_irq(&phba->hbalock);
+		goto out;
+	}
+
+	/* Parse the FCF record from the non-embedded mailbox command */
+	new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
+						      &next_fcf_index);
+	if (!new_fcf_record) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+				"2766 Mailbox command READ_FCF_RECORD "
+				"failed to retrieve a FCF record.\n");
+		goto error_out;
+	}
+
+	/* Get the needed parameters from FCF record */
+	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
+				      &addr_mode, &vlan_id);
+
+	/* Log the FCF record information if turned on */
+	lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
+				      next_fcf_index);
+
+	fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+	if (!rc) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+				"2848 Remove ineligible FCF (x%x) from "
+				"from roundrobin bmask\n", fcf_index);
+		/* Clear roundrobin bmask bit for ineligible FCF */
+		lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
+		/* Perform next round of roundrobin FCF failover */
+		fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
+		rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
+		if (rc)
+			goto out;
+		goto error_out;
+	}
+
+	if (fcf_index == phba->fcf.current_rec.fcf_indx) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+				"2760 Perform FLOGI roundrobin FCF failover: "
+				"FCF (x%x) back to FCF (x%x)\n",
+				phba->fcf.current_rec.fcf_indx, fcf_index);
+		/* Wait 500 ms before retrying FLOGI to current FCF */
+		msleep(500);
+		lpfc_issue_init_vfi(phba->pport);
+		goto out;
+	}
+
+	/* Upload new FCF record to the failover FCF record */
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"2834 Update current FCF (x%x) with new FCF (x%x)\n",
+			phba->fcf.failover_rec.fcf_indx, fcf_index);
+	spin_lock_irq(&phba->hbalock);
+	__lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
+				 new_fcf_record, addr_mode, vlan_id,
+				 (boot_flag ? BOOT_ENABLE : 0));
+	spin_unlock_irq(&phba->hbalock);
+
+	current_fcf_index = phba->fcf.current_rec.fcf_indx;
+
+	/* Unregister the current in-use FCF record */
+	lpfc_unregister_fcf(phba);
+
+	/* Replace in-use record with the new record */
+	memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
+	       sizeof(struct lpfc_fcf_rec));
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"2783 Perform FLOGI roundrobin FCF failover: FCF "
+			"(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
+
+error_out:
+	lpfc_register_fcf(phba);
+out:
+	lpfc_sli4_mbox_cmd_free(phba, mboxq);
+}
+
+/**
+ * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ *
+ * This is the callback function of read FCF record mailbox command for
+ * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
+ * failover when a new FCF event happened. If the FCF read back is
+ * valid/available and it passes the connection list check, it updates
+ * the bmask for the eligible FCF record for roundrobin failover.
+ */
+void
+lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct fcf_record *new_fcf_record;
+	uint32_t boot_flag, addr_mode;
+	uint16_t fcf_index, next_fcf_index;
+	uint16_t vlan_id;
+	int rc;
+
+	/* If link state is not up, no need to proceed */
+	if (phba->link_state < LPFC_LINK_UP)
+		goto out;
+
+	/* If FCF discovery period is over, no need to proceed */
+	if (!(phba->fcf.fcf_flag & FCF_DISCOVERY))
+		goto out;
+
+	/* Parse the FCF record from the non-embedded mailbox command */
+	new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
+						      &next_fcf_index);
+	if (!new_fcf_record) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+				"2767 Mailbox command READ_FCF_RECORD "
+				"failed to retrieve a FCF record.\n");
+		goto out;
+	}
+
+	/* Check the connection list for eligibility */
+	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
+				      &addr_mode, &vlan_id);
+
+	/* Log the FCF record information if turned on */
+	lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
+				      next_fcf_index);
+
+	if (!rc)
+		goto out;
+
+	/* Update the eligible FCF record index bmask */
+	fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+
+	rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
+
+out:
+	lpfc_sli4_mbox_cmd_free(phba, mboxq);
+}
+
+/**
+ * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox data structure.
+ *
+ * This function handles completion of init vfi mailbox command.
+ */
+void
+lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_vport *vport = mboxq->vport;
+
+	/*
+	 * VFI not supported on interface type 0, just do the flogi
+	 * Also continue if the VFI is in use - just use the same one.
+	 */
+	if (mboxq->u.mb.mbxStatus &&
+	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+			LPFC_SLI_INTF_IF_TYPE_0) &&
+	    mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+				LOG_MBOX,
+				"2891 Init VFI mailbox failed 0x%x\n",
+				mboxq->u.mb.mbxStatus);
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		return;
+	}
+
+	lpfc_initial_flogi(vport);
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
+ * @vport: pointer to lpfc_vport data structure.
+ *
+ * This function issue a init_vfi mailbox command to initialize the VFI and
+ * VPI for the physical port.
+ */
+void
+lpfc_issue_init_vfi(struct lpfc_vport *vport)
+{
+	LPFC_MBOXQ_t *mboxq;
+	int rc;
+	struct lpfc_hba *phba = vport->phba;
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+			LOG_MBOX, "2892 Failed to allocate "
+			"init_vfi mailbox\n");
+		return;
+	}
+	lpfc_init_vfi(mboxq, vport);
+	mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+			LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n");
+		mempool_free(mboxq, vport->phba->mbox_mem_pool);
+	}
+}
+
+/**
+ * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox data structure.
+ *
+ * This function handles completion of init vpi mailbox command.
+ */
+void
+lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_vport *vport = mboxq->vport;
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (mboxq->u.mb.mbxStatus) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+				LOG_MBOX,
+				"2609 Init VPI mailbox failed 0x%x\n",
+				mboxq->u.mb.mbxStatus);
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		return;
+	}
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
+	spin_unlock_irq(shost->host_lock);
+
+	/* If this port is physical port or FDISC is done, do reg_vpi */
+	if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
+			ndlp = lpfc_findnode_did(vport, Fabric_DID);
+			if (!ndlp)
+				lpfc_printf_vlog(vport, KERN_ERR,
+					LOG_DISCOVERY,
+					"2731 Cannot find fabric "
+					"controller node\n");
+			else
+				lpfc_register_new_vport(phba, vport, ndlp);
+			mempool_free(mboxq, phba->mbox_mem_pool);
+			return;
+	}
+
+	if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
+		lpfc_initial_fdisc(vport);
+	else {
+		lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2606 No NPIV Fabric support\n");
+	}
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
+ * @vport: pointer to lpfc_vport data structure.
+ *
+ * This function issue a init_vpi mailbox command to initialize
+ * VPI for the vport.
+ */
+void
+lpfc_issue_init_vpi(struct lpfc_vport *vport)
+{
+	LPFC_MBOXQ_t *mboxq;
+	int rc;
+
+	mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+			LOG_MBOX, "2607 Failed to allocate "
+			"init_vpi mailbox\n");
+		return;
+	}
+	lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
+	mboxq->vport = vport;
+	mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
+	rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+			LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n");
+		mempool_free(mboxq, vport->phba->mbox_mem_pool);
+	}
+}
+
+/**
+ * lpfc_start_fdiscs - send fdiscs for each vports on this port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function loops through the list of vports on the @phba and issues an
+ * FDISC if possible.
+ */
+void
+lpfc_start_fdiscs(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	int i;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
+				continue;
+			/* There are no vpi for this vport */
+			if (vports[i]->vpi > phba->max_vpi) {
+				lpfc_vport_set_state(vports[i],
+						     FC_VPORT_FAILED);
+				continue;
+			}
+			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+				lpfc_vport_set_state(vports[i],
+						     FC_VPORT_LINKDOWN);
+				continue;
+			}
+			if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
+				lpfc_issue_init_vpi(vports[i]);
+				continue;
+			}
+			if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
+				lpfc_initial_fdisc(vports[i]);
+			else {
+				lpfc_vport_set_state(vports[i],
+						     FC_VPORT_NO_FABRIC_SUPP);
+				lpfc_printf_vlog(vports[i], KERN_ERR,
+						 LOG_ELS,
+						 "0259 No NPIV "
+						 "Fabric support\n");
+			}
+		}
+	}
+	lpfc_destroy_vport_work_array(phba, vports);
+}
+
+void
+lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_dmabuf *dmabuf = mboxq->context1;
+	struct lpfc_vport *vport = mboxq->vport;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	/*
+	 * VFI not supported for interface type 0, so ignore any mailbox
+	 * error (except VFI in use) and continue with the discovery.
+	 */
+	if (mboxq->u.mb.mbxStatus &&
+	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+			LPFC_SLI_INTF_IF_TYPE_0) &&
+	    mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+			 "2018 REG_VFI mbxStatus error x%x "
+			 "HBA state x%x\n",
+			 mboxq->u.mb.mbxStatus, vport->port_state);
+		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+			/* FLOGI failed, use loop map to make discovery list */
+			lpfc_disc_list_loopmap(vport);
+			/* Start discovery */
+			lpfc_disc_start(vport);
+			goto out_free_mem;
+		}
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		goto out_free_mem;
+	}
+	/* The VPI is implicitly registered when the VFI is registered */
+	spin_lock_irq(shost->host_lock);
+	vport->vpi_state |= LPFC_VPI_REGISTERED;
+	vport->fc_flag |= FC_VFI_REGISTERED;
+	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+	vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
+	spin_unlock_irq(shost->host_lock);
+
+	/* In case SLI4 FC loopback test, we are ready */
+	if ((phba->sli_rev == LPFC_SLI_REV4) &&
+	    (phba->link_flag & LS_LOOPBACK_MODE)) {
+		phba->link_state = LPFC_HBA_READY;
+		goto out_free_mem;
+	}
+
+	if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
+		/* For private loop just start discovery and we are done. */
+		if ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
+		    !(vport->fc_flag & FC_PUBLIC_LOOP)) {
+			/* Use loop map to make discovery list */
+			lpfc_disc_list_loopmap(vport);
+			/* Start discovery */
+			lpfc_disc_start(vport);
+		} else {
+			lpfc_start_fdiscs(phba);
+			lpfc_do_scr_ns_plogi(phba, vport);
+		}
+	}
+
+out_free_mem:
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+	kfree(dmabuf);
+	return;
+}
+
+static void
+lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
+	struct lpfc_vport  *vport = pmb->vport;
+
+
+	/* Check for error */
+	if (mb->mbxStatus) {
+		/* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+				 "0319 READ_SPARAM mbxStatus error x%x "
+				 "hba state x%x>\n",
+				 mb->mbxStatus, vport->port_state);
+		lpfc_linkdown(phba);
+		goto out;
+	}
+
+	memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
+	       sizeof (struct serv_parm));
+	lpfc_update_vport_wwn(vport);
+	if (vport->port_type == LPFC_PHYSICAL_PORT) {
+		memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
+		memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
+	}
+
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return;
+
+out:
+	pmb->context1 = NULL;
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	lpfc_issue_clear_la(phba, vport);
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return;
+}
+
+static void
+lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
+{
+	struct lpfc_vport *vport = phba->pport;
+	LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
+	struct Scsi_Host *shost;
+	int i;
+	struct lpfc_dmabuf *mp;
+	int rc;
+	struct fcf_record *fcf_record;
+
+	spin_lock_irq(&phba->hbalock);
+	switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
+	case LPFC_LINK_SPEED_1GHZ:
+	case LPFC_LINK_SPEED_2GHZ:
+	case LPFC_LINK_SPEED_4GHZ:
+	case LPFC_LINK_SPEED_8GHZ:
+	case LPFC_LINK_SPEED_10GHZ:
+	case LPFC_LINK_SPEED_16GHZ:
+		phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
+		break;
+	default:
+		phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
+		break;
+	}
+
+	phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
+	phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
+
+	shost = lpfc_shost_from_vport(vport);
+	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+		phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
+
+		/* if npiv is enabled and this adapter supports npiv log
+		 * a message that npiv is not supported in this topology
+		 */
+		if (phba->cfg_enable_npiv && phba->max_vpi)
+			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+				"1309 Link Up Event npiv not supported in loop "
+				"topology\n");
+				/* Get Loop Map information */
+		if (bf_get(lpfc_mbx_read_top_il, la)) {
+			spin_lock(shost->host_lock);
+			vport->fc_flag |= FC_LBIT;
+			spin_unlock(shost->host_lock);
+		}
+
+		vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
+		i = la->lilpBde64.tus.f.bdeSize;
+
+		if (i == 0) {
+			phba->alpa_map[0] = 0;
+		} else {
+			if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
+				int numalpa, j, k;
+				union {
+					uint8_t pamap[16];
+					struct {
+						uint32_t wd1;
+						uint32_t wd2;
+						uint32_t wd3;
+						uint32_t wd4;
+					} pa;
+				} un;
+				numalpa = phba->alpa_map[0];
+				j = 0;
+				while (j < numalpa) {
+					memset(un.pamap, 0, 16);
+					for (k = 1; j < numalpa; k++) {
+						un.pamap[k - 1] =
+							phba->alpa_map[j + 1];
+						j++;
+						if (k == 16)
+							break;
+					}
+					/* Link Up Event ALPA map */
+					lpfc_printf_log(phba,
+							KERN_WARNING,
+							LOG_LINK_EVENT,
+							"1304 Link Up Event "
+							"ALPA map Data: x%x "
+							"x%x x%x x%x\n",
+							un.pa.wd1, un.pa.wd2,
+							un.pa.wd3, un.pa.wd4);
+				}
+			}
+		}
+	} else {
+		if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
+			if (phba->max_vpi && phba->cfg_enable_npiv &&
+			   (phba->sli_rev >= LPFC_SLI_REV3))
+				phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
+		}
+		vport->fc_myDID = phba->fc_pref_DID;
+		spin_lock(shost->host_lock);
+		vport->fc_flag |= FC_LBIT;
+		spin_unlock(shost->host_lock);
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_linkup(phba);
+	sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!sparam_mbox)
+		goto out;
+
+	rc = lpfc_read_sparam(phba, sparam_mbox, 0);
+	if (rc) {
+		mempool_free(sparam_mbox, phba->mbox_mem_pool);
+		goto out;
+	}
+	sparam_mbox->vport = vport;
+	sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
+	rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+		mempool_free(sparam_mbox, phba->mbox_mem_pool);
+		goto out;
+	}
+
+	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+		cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (!cfglink_mbox)
+			goto out;
+		vport->port_state = LPFC_LOCAL_CFG_LINK;
+		lpfc_config_link(phba, cfglink_mbox);
+		cfglink_mbox->vport = vport;
+		cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
+		rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
+		if (rc == MBX_NOT_FINISHED) {
+			mempool_free(cfglink_mbox, phba->mbox_mem_pool);
+			goto out;
+		}
+	} else {
+		vport->port_state = LPFC_VPORT_UNKNOWN;
+		/*
+		 * Add the driver's default FCF record at FCF index 0 now. This
+		 * is phase 1 implementation that support FCF index 0 and driver
+		 * defaults.
+		 */
+		if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
+			fcf_record = kzalloc(sizeof(struct fcf_record),
+					GFP_KERNEL);
+			if (unlikely(!fcf_record)) {
+				lpfc_printf_log(phba, KERN_ERR,
+					LOG_MBOX | LOG_SLI,
+					"2554 Could not allocate memory for "
+					"fcf record\n");
+				rc = -ENODEV;
+				goto out;
+			}
+
+			lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
+						LPFC_FCOE_FCF_DEF_INDEX);
+			rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
+			if (unlikely(rc)) {
+				lpfc_printf_log(phba, KERN_ERR,
+					LOG_MBOX | LOG_SLI,
+					"2013 Could not manually add FCF "
+					"record 0, status %d\n", rc);
+				rc = -ENODEV;
+				kfree(fcf_record);
+				goto out;
+			}
+			kfree(fcf_record);
+		}
+		/*
+		 * The driver is expected to do FIP/FCF. Call the port
+		 * and get the FCF Table.
+		 */
+		spin_lock_irq(&phba->hbalock);
+		if (phba->hba_flag & FCF_TS_INPROG) {
+			spin_unlock_irq(&phba->hbalock);
+			return;
+		}
+		/* This is the initial FCF discovery scan */
+		phba->fcf.fcf_flag |= FCF_INIT_DISC;
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+				"2778 Start FCF table scan at linkup\n");
+		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
+						     LPFC_FCOE_FCF_GET_FIRST);
+		if (rc) {
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
+			spin_unlock_irq(&phba->hbalock);
+			goto out;
+		}
+		/* Reset FCF roundrobin bmask for new discovery */
+		lpfc_sli4_clear_fcf_rr_bmask(phba);
+	}
+
+	return;
+out:
+	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+			 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
+			 vport->port_state, sparam_mbox, cfglink_mbox);
+	lpfc_issue_clear_la(phba, vport);
+	return;
+}
+
+static void
+lpfc_enable_la(struct lpfc_hba *phba)
+{
+	uint32_t control;
+	struct lpfc_sli *psli = &phba->sli;
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag |= LPFC_PROCESS_LA;
+	if (phba->sli_rev <= LPFC_SLI_REV3) {
+		control = readl(phba->HCregaddr);
+		control |= HC_LAINT_ENA;
+		writel(control, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+	}
+	spin_unlock_irq(&phba->hbalock);
+}
+
+static void
+lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
+{
+	lpfc_linkdown(phba);
+	lpfc_enable_la(phba);
+	lpfc_unregister_unused_fcf(phba);
+	/* turn on Link Attention interrupts - no CLEAR_LA needed */
+}
+
+
+/*
+ * This routine handles processing a READ_TOPOLOGY mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_vport *vport = pmb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_mbx_read_top *la;
+	MAILBOX_t *mb = &pmb->u.mb;
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+
+	/* Unblock ELS traffic */
+	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
+	/* Check for error */
+	if (mb->mbxStatus) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+				"1307 READ_LA mbox error x%x state x%x\n",
+				mb->mbxStatus, vport->port_state);
+		lpfc_mbx_issue_link_down(phba);
+		phba->link_state = LPFC_HBA_ERROR;
+		goto lpfc_mbx_cmpl_read_topology_free_mbuf;
+	}
+
+	la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
+
+	memcpy(&phba->alpa_map[0], mp->virt, 128);
+
+	spin_lock_irq(shost->host_lock);
+	if (bf_get(lpfc_mbx_read_top_pb, la))
+		vport->fc_flag |= FC_BYPASSED_MODE;
+	else
+		vport->fc_flag &= ~FC_BYPASSED_MODE;
+	spin_unlock_irq(shost->host_lock);
+
+	if ((phba->fc_eventTag  < la->eventTag) ||
+	    (phba->fc_eventTag == la->eventTag)) {
+		phba->fc_stat.LinkMultiEvent++;
+		if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)
+			if (phba->fc_eventTag != 0)
+				lpfc_linkdown(phba);
+	}
+
+	phba->fc_eventTag = la->eventTag;
+	spin_lock_irq(&phba->hbalock);
+	if (bf_get(lpfc_mbx_read_top_mm, la))
+		phba->sli.sli_flag |= LPFC_MENLO_MAINT;
+	else
+		phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
+	spin_unlock_irq(&phba->hbalock);
+
+	phba->link_events++;
+	if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) &&
+	    (!bf_get(lpfc_mbx_read_top_mm, la))) {
+		phba->fc_stat.LinkUp++;
+		if (phba->link_flag & LS_LOOPBACK_MODE) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+					"1306 Link Up Event in loop back mode "
+					"x%x received Data: x%x x%x x%x x%x\n",
+					la->eventTag, phba->fc_eventTag,
+					bf_get(lpfc_mbx_read_top_alpa_granted,
+					       la),
+					bf_get(lpfc_mbx_read_top_link_spd, la),
+					phba->alpa_map[0]);
+		} else {
+			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+					"1303 Link Up Event x%x received "
+					"Data: x%x x%x x%x x%x x%x x%x %d\n",
+					la->eventTag, phba->fc_eventTag,
+					bf_get(lpfc_mbx_read_top_alpa_granted,
+					       la),
+					bf_get(lpfc_mbx_read_top_link_spd, la),
+					phba->alpa_map[0],
+					bf_get(lpfc_mbx_read_top_mm, la),
+					bf_get(lpfc_mbx_read_top_fa, la),
+					phba->wait_4_mlo_maint_flg);
+		}
+		lpfc_mbx_process_link_up(phba, la);
+	} else if (bf_get(lpfc_mbx_read_top_att_type, la) ==
+		   LPFC_ATT_LINK_DOWN) {
+		phba->fc_stat.LinkDown++;
+		if (phba->link_flag & LS_LOOPBACK_MODE)
+			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+				"1308 Link Down Event in loop back mode "
+				"x%x received "
+				"Data: x%x x%x x%x\n",
+				la->eventTag, phba->fc_eventTag,
+				phba->pport->port_state, vport->fc_flag);
+		else
+			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+				"1305 Link Down Event x%x received "
+				"Data: x%x x%x x%x x%x x%x\n",
+				la->eventTag, phba->fc_eventTag,
+				phba->pport->port_state, vport->fc_flag,
+				bf_get(lpfc_mbx_read_top_mm, la),
+				bf_get(lpfc_mbx_read_top_fa, la));
+		lpfc_mbx_issue_link_down(phba);
+	}
+	if ((bf_get(lpfc_mbx_read_top_mm, la)) &&
+	    (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)) {
+		if (phba->link_state != LPFC_LINK_DOWN) {
+			phba->fc_stat.LinkDown++;
+			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+				"1312 Link Down Event x%x received "
+				"Data: x%x x%x x%x\n",
+				la->eventTag, phba->fc_eventTag,
+				phba->pport->port_state, vport->fc_flag);
+			lpfc_mbx_issue_link_down(phba);
+		} else
+			lpfc_enable_la(phba);
+
+		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+				"1310 Menlo Maint Mode Link up Event x%x rcvd "
+				"Data: x%x x%x x%x\n",
+				la->eventTag, phba->fc_eventTag,
+				phba->pport->port_state, vport->fc_flag);
+		/*
+		 * The cmnd that triggered this will be waiting for this
+		 * signal.
+		 */
+		/* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
+		if (phba->wait_4_mlo_maint_flg) {
+			phba->wait_4_mlo_maint_flg = 0;
+			wake_up_interruptible(&phba->wait_4_mlo_m_q);
+		}
+	}
+
+	if (bf_get(lpfc_mbx_read_top_fa, la)) {
+		if (bf_get(lpfc_mbx_read_top_mm, la))
+			lpfc_issue_clear_la(phba, vport);
+		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+				"1311 fa %d\n",
+				bf_get(lpfc_mbx_read_top_fa, la));
+	}
+
+lpfc_mbx_cmpl_read_topology_free_mbuf:
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return;
+}
+
+/*
+ * This routine handles processing a REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_vport  *vport = pmb->vport;
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+
+	pmb->context1 = NULL;
+	pmb->context2 = NULL;
+
+	if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
+		ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
+
+	if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
+	    ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
+		/* We rcvd a rscn after issuing this
+		 * mbox reg login, we may have cycled
+		 * back through the state and be
+		 * back at reg login state so this
+		 * mbox needs to be ignored becase
+		 * there is another reg login in
+		 * process.
+		 */
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+		spin_unlock_irq(shost->host_lock);
+	} else
+		/* Good status, call state machine */
+		lpfc_disc_state_machine(vport, ndlp, pmb,
+				NLP_EVT_CMPL_REG_LOGIN);
+
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	mempool_free(pmb, phba->mbox_mem_pool);
+	/* decrement the node reference count held for this callback
+	 * function.
+	 */
+	lpfc_nlp_put(ndlp);
+
+	return;
+}
+
+static void
+lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+	struct lpfc_vport *vport = pmb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+
+	switch (mb->mbxStatus) {
+	case 0x0011:
+	case 0x0020:
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+				 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
+				 mb->mbxStatus);
+		break;
+	/* If VPI is busy, reset the HBA */
+	case 0x9700:
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+			"2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
+			vport->vpi, mb->mbxStatus);
+		if (!(phba->pport->load_flag & FC_UNLOADING))
+			lpfc_workq_post_event(phba, NULL, NULL,
+				LPFC_EVT_RESET_HBA);
+	}
+	spin_lock_irq(shost->host_lock);
+	vport->vpi_state &= ~LPFC_VPI_REGISTERED;
+	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+	spin_unlock_irq(shost->host_lock);
+	vport->unreg_vpi_cmpl = VPORT_OK;
+	mempool_free(pmb, phba->mbox_mem_pool);
+	lpfc_cleanup_vports_rrqs(vport, NULL);
+	/*
+	 * This shost reference might have been taken at the beginning of
+	 * lpfc_vport_delete()
+	 */
+	if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
+		scsi_host_put(shost);
+}
+
+int
+lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	LPFC_MBOXQ_t *mbox;
+	int rc;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return 1;
+
+	lpfc_unreg_vpi(phba, vport->vpi, mbox);
+	mbox->vport = vport;
+	mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
+				 "1800 Could not issue unreg_vpi\n");
+		mempool_free(mbox, phba->mbox_mem_pool);
+		vport->unreg_vpi_cmpl = VPORT_ERROR;
+		return rc;
+	}
+	return 0;
+}
+
+static void
+lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_vport *vport = pmb->vport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	MAILBOX_t *mb = &pmb->u.mb;
+
+	switch (mb->mbxStatus) {
+	case 0x0011:
+	case 0x9601:
+	case 0x9602:
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+				 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
+				 mb->mbxStatus);
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+		spin_unlock_irq(shost->host_lock);
+		vport->fc_myDID = 0;
+		goto out;
+	}
+
+	spin_lock_irq(shost->host_lock);
+	vport->vpi_state |= LPFC_VPI_REGISTERED;
+	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+	spin_unlock_irq(shost->host_lock);
+	vport->num_disc_nodes = 0;
+	/* go thru NPR list and issue ELS PLOGIs */
+	if (vport->fc_npr_cnt)
+		lpfc_els_disc_plogi(vport);
+
+	if (!vport->num_disc_nodes) {
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag &= ~FC_NDISC_ACTIVE;
+		spin_unlock_irq(shost->host_lock);
+		lpfc_can_disctmo(vport);
+	}
+	vport->port_state = LPFC_VPORT_READY;
+
+out:
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_create_static_vport - Read HBA config region to create static vports.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine issue a DUMP mailbox command for config region 22 to get
+ * the list of static vports to be created. The function create vports
+ * based on the information returned from the HBA.
+ **/
+void
+lpfc_create_static_vport(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *pmb = NULL;
+	MAILBOX_t *mb;
+	struct static_vport_info *vport_info;
+	int rc = 0, i;
+	struct fc_vport_identifiers vport_id;
+	struct fc_vport *new_fc_vport;
+	struct Scsi_Host *shost;
+	struct lpfc_vport *vport;
+	uint16_t offset = 0;
+	uint8_t *vport_buff;
+	struct lpfc_dmabuf *mp;
+	uint32_t byte_count = 0;
+
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0542 lpfc_create_static_vport failed to"
+				" allocate mailbox memory\n");
+		return;
+	}
+
+	mb = &pmb->u.mb;
+
+	vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
+	if (!vport_info) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0543 lpfc_create_static_vport failed to"
+				" allocate vport_info\n");
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return;
+	}
+
+	vport_buff = (uint8_t *) vport_info;
+	do {
+		if (lpfc_dump_static_vport(phba, pmb, offset))
+			goto out;
+
+		pmb->vport = phba->pport;
+		rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO);
+
+		if ((rc != MBX_SUCCESS) || mb->mbxStatus) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0544 lpfc_create_static_vport failed to"
+				" issue dump mailbox command ret 0x%x "
+				"status 0x%x\n",
+				rc, mb->mbxStatus);
+			goto out;
+		}
+
+		if (phba->sli_rev == LPFC_SLI_REV4) {
+			byte_count = pmb->u.mqe.un.mb_words[5];
+			mp = (struct lpfc_dmabuf *) pmb->context2;
+			if (byte_count > sizeof(struct static_vport_info) -
+					offset)
+				byte_count = sizeof(struct static_vport_info)
+					- offset;
+			memcpy(vport_buff + offset, mp->virt, byte_count);
+			offset += byte_count;
+		} else {
+			if (mb->un.varDmp.word_cnt >
+				sizeof(struct static_vport_info) - offset)
+				mb->un.varDmp.word_cnt =
+					sizeof(struct static_vport_info)
+						- offset;
+			byte_count = mb->un.varDmp.word_cnt;
+			lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
+				vport_buff + offset,
+				byte_count);
+
+			offset += byte_count;
+		}
+
+	} while (byte_count &&
+		offset < sizeof(struct static_vport_info));
+
+
+	if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
+		((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
+			!= VPORT_INFO_REV)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0545 lpfc_create_static_vport bad"
+			" information header 0x%x 0x%x\n",
+			le32_to_cpu(vport_info->signature),
+			le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
+
+		goto out;
+	}
+
+	shost = lpfc_shost_from_vport(phba->pport);
+
+	for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
+		memset(&vport_id, 0, sizeof(vport_id));
+		vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
+		vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
+		if (!vport_id.port_name || !vport_id.node_name)
+			continue;
+
+		vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
+		vport_id.vport_type = FC_PORTTYPE_NPIV;
+		vport_id.disable = false;
+		new_fc_vport = fc_vport_create(shost, 0, &vport_id);
+
+		if (!new_fc_vport) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0546 lpfc_create_static_vport failed to"
+				" create vport\n");
+			continue;
+		}
+
+		vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
+		vport->vport_flag |= STATIC_VPORT;
+	}
+
+out:
+	kfree(vport_info);
+	if (rc != MBX_TIMEOUT) {
+		if (pmb->context2) {
+			mp = (struct lpfc_dmabuf *) pmb->context2;
+			lpfc_mbuf_free(phba, mp->virt, mp->phys);
+			kfree(mp);
+		}
+		mempool_free(pmb, phba->mbox_mem_pool);
+	}
+
+	return;
+}
+
+/*
+ * This routine handles processing a Fabric REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_vport *vport = pmb->vport;
+	MAILBOX_t *mb = &pmb->u.mb;
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost;
+
+	ndlp = (struct lpfc_nodelist *) pmb->context2;
+	pmb->context1 = NULL;
+	pmb->context2 = NULL;
+
+	if (mb->mbxStatus) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+				 "0258 Register Fabric login error: 0x%x\n",
+				 mb->mbxStatus);
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+		mempool_free(pmb, phba->mbox_mem_pool);
+
+		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+			/* FLOGI failed, use loop map to make discovery list */
+			lpfc_disc_list_loopmap(vport);
+
+			/* Start discovery */
+			lpfc_disc_start(vport);
+			/* Decrement the reference count to ndlp after the
+			 * reference to the ndlp are done.
+			 */
+			lpfc_nlp_put(ndlp);
+			return;
+		}
+
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		/* Decrement the reference count to ndlp after the reference
+		 * to the ndlp are done.
+		 */
+		lpfc_nlp_put(ndlp);
+		return;
+	}
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		ndlp->nlp_rpi = mb->un.varWords[0];
+	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+	ndlp->nlp_type |= NLP_FABRIC;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+
+	if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
+		/* when physical port receive logo donot start
+		 * vport discovery */
+		if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
+			lpfc_start_fdiscs(phba);
+		else {
+			shost = lpfc_shost_from_vport(vport);
+			spin_lock_irq(shost->host_lock);
+			vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
+			spin_unlock_irq(shost->host_lock);
+		}
+		lpfc_do_scr_ns_plogi(phba, vport);
+	}
+
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+	/* Drop the reference count from the mbox at the end after
+	 * all the current reference to the ndlp have been done.
+	 */
+	lpfc_nlp_put(ndlp);
+	return;
+}
+
+/*
+ * This routine handles processing a NameServer REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+	struct lpfc_vport *vport = pmb->vport;
+
+	pmb->context1 = NULL;
+	pmb->context2 = NULL;
+
+	if (mb->mbxStatus) {
+out:
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0260 Register NameServer error: 0x%x\n",
+				 mb->mbxStatus);
+		/* decrement the node reference count held for this
+		 * callback function.
+		 */
+		lpfc_nlp_put(ndlp);
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+		mempool_free(pmb, phba->mbox_mem_pool);
+
+		/* If no other thread is using the ndlp, free it */
+		lpfc_nlp_not_used(ndlp);
+
+		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+			/*
+			 * RegLogin failed, use loop map to make discovery
+			 * list
+			 */
+			lpfc_disc_list_loopmap(vport);
+
+			/* Start discovery */
+			lpfc_disc_start(vport);
+			return;
+		}
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		return;
+	}
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		ndlp->nlp_rpi = mb->un.varWords[0];
+	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+	ndlp->nlp_type |= NLP_FABRIC;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+
+	if (vport->port_state < LPFC_VPORT_READY) {
+		/* Link up discovery requires Fabric registration. */
+		lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
+		lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
+		lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
+		lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
+		lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
+
+		/* Issue SCR just before NameServer GID_FT Query */
+		lpfc_issue_els_scr(vport, SCR_DID, 0);
+	}
+
+	vport->fc_ns_retry = 0;
+	/* Good status, issue CT Request to NameServer */
+	if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
+		/* Cannot issue NameServer Query, so finish up discovery */
+		goto out;
+	}
+
+	/* decrement the node reference count held for this
+	 * callback function.
+	 */
+	lpfc_nlp_put(ndlp);
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+	return;
+}
+
+static void
+lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct fc_rport  *rport;
+	struct lpfc_rport_data *rdata;
+	struct fc_rport_identifiers rport_ids;
+	struct lpfc_hba  *phba = vport->phba;
+
+	/* Remote port has reappeared. Re-register w/ FC transport */
+	rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
+	rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
+	rport_ids.port_id = ndlp->nlp_DID;
+	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
+
+	/*
+	 * We leave our node pointer in rport->dd_data when we unregister a
+	 * FCP target port.  But fc_remote_port_add zeros the space to which
+	 * rport->dd_data points.  So, if we're reusing a previously
+	 * registered port, drop the reference that we took the last time we
+	 * registered the port.
+	 */
+	if (ndlp->rport && ndlp->rport->dd_data &&
+	    ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp)
+		lpfc_nlp_put(ndlp);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+		"rport add:       did:x%x flg:x%x type x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+
+	/* Don't add the remote port if unloading. */
+	if (vport->load_flag & FC_UNLOADING)
+		return;
+
+	ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
+	if (!rport || !get_device(&rport->dev)) {
+		dev_printk(KERN_WARNING, &phba->pcidev->dev,
+			   "Warning: fc_remote_port_add failed\n");
+		return;
+	}
+
+	/* initialize static port data */
+	rport->maxframe_size = ndlp->nlp_maxframe;
+	rport->supported_classes = ndlp->nlp_class_sup;
+	rdata = rport->dd_data;
+	rdata->pnode = lpfc_nlp_get(ndlp);
+
+	if (ndlp->nlp_type & NLP_FCP_TARGET)
+		rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+	if (ndlp->nlp_type & NLP_FCP_INITIATOR)
+		rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+
+	if (rport_ids.roles !=  FC_RPORT_ROLE_UNKNOWN)
+		fc_remote_port_rolechg(rport, rport_ids.roles);
+
+	if ((rport->scsi_target_id != -1) &&
+	    (rport->scsi_target_id < LPFC_MAX_TARGET)) {
+		ndlp->nlp_sid = rport->scsi_target_id;
+	}
+	return;
+}
+
+static void
+lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
+{
+	struct fc_rport *rport = ndlp->rport;
+
+	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
+		"rport delete:    did:x%x flg:x%x type x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+
+	fc_remote_port_delete(rport);
+
+	return;
+}
+
+static void
+lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	spin_lock_irq(shost->host_lock);
+	switch (state) {
+	case NLP_STE_UNUSED_NODE:
+		vport->fc_unused_cnt += count;
+		break;
+	case NLP_STE_PLOGI_ISSUE:
+		vport->fc_plogi_cnt += count;
+		break;
+	case NLP_STE_ADISC_ISSUE:
+		vport->fc_adisc_cnt += count;
+		break;
+	case NLP_STE_REG_LOGIN_ISSUE:
+		vport->fc_reglogin_cnt += count;
+		break;
+	case NLP_STE_PRLI_ISSUE:
+		vport->fc_prli_cnt += count;
+		break;
+	case NLP_STE_UNMAPPED_NODE:
+		vport->fc_unmap_cnt += count;
+		break;
+	case NLP_STE_MAPPED_NODE:
+		vport->fc_map_cnt += count;
+		break;
+	case NLP_STE_NPR_NODE:
+		vport->fc_npr_cnt += count;
+		break;
+	}
+	spin_unlock_irq(shost->host_lock);
+}
+
+static void
+lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		       int old_state, int new_state)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (new_state == NLP_STE_UNMAPPED_NODE) {
+		ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
+		ndlp->nlp_type |= NLP_FC_NODE;
+	}
+	if (new_state == NLP_STE_MAPPED_NODE)
+		ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
+	if (new_state == NLP_STE_NPR_NODE)
+		ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
+
+	/* Transport interface */
+	if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
+			    old_state == NLP_STE_UNMAPPED_NODE)) {
+		vport->phba->nport_event_cnt++;
+		lpfc_unregister_remote_port(ndlp);
+	}
+
+	if (new_state ==  NLP_STE_MAPPED_NODE ||
+	    new_state == NLP_STE_UNMAPPED_NODE) {
+		vport->phba->nport_event_cnt++;
+		/*
+		 * Tell the fc transport about the port, if we haven't
+		 * already. If we have, and it's a scsi entity, be
+		 * sure to unblock any attached scsi devices
+		 */
+		lpfc_register_remote_port(vport, ndlp);
+	}
+	if ((new_state ==  NLP_STE_MAPPED_NODE) &&
+		(vport->stat_data_enabled)) {
+		/*
+		 * A new target is discovered, if there is no buffer for
+		 * statistical data collection allocate buffer.
+		 */
+		ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
+					 sizeof(struct lpfc_scsicmd_bkt),
+					 GFP_KERNEL);
+
+		if (!ndlp->lat_data)
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+				"0286 lpfc_nlp_state_cleanup failed to "
+				"allocate statistical data buffer DID "
+				"0x%x\n", ndlp->nlp_DID);
+	}
+	/*
+	 * if we added to Mapped list, but the remote port
+	 * registration failed or assigned a target id outside
+	 * our presentable range - move the node to the
+	 * Unmapped List
+	 */
+	if (new_state == NLP_STE_MAPPED_NODE &&
+	    (!ndlp->rport ||
+	     ndlp->rport->scsi_target_id == -1 ||
+	     ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
+		spin_unlock_irq(shost->host_lock);
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+	}
+}
+
+static char *
+lpfc_nlp_state_name(char *buffer, size_t size, int state)
+{
+	static char *states[] = {
+		[NLP_STE_UNUSED_NODE] = "UNUSED",
+		[NLP_STE_PLOGI_ISSUE] = "PLOGI",
+		[NLP_STE_ADISC_ISSUE] = "ADISC",
+		[NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
+		[NLP_STE_PRLI_ISSUE] = "PRLI",
+		[NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
+		[NLP_STE_MAPPED_NODE] = "MAPPED",
+		[NLP_STE_NPR_NODE] = "NPR",
+	};
+
+	if (state < NLP_STE_MAX_STATE && states[state])
+		strlcpy(buffer, states[state], size);
+	else
+		snprintf(buffer, size, "unknown (%d)", state);
+	return buffer;
+}
+
+void
+lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		   int state)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	int  old_state = ndlp->nlp_state;
+	char name1[16], name2[16];
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+			 "0904 NPort state transition x%06x, %s -> %s\n",
+			 ndlp->nlp_DID,
+			 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
+			 lpfc_nlp_state_name(name2, sizeof(name2), state));
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+		"node statechg    did:x%x old:%d ste:%d",
+		ndlp->nlp_DID, old_state, state);
+
+	if (old_state == NLP_STE_NPR_NODE &&
+	    state != NLP_STE_NPR_NODE)
+		lpfc_cancel_retry_delay_tmo(vport, ndlp);
+	if (old_state == NLP_STE_UNMAPPED_NODE) {
+		ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
+		ndlp->nlp_type &= ~NLP_FC_NODE;
+	}
+
+	if (list_empty(&ndlp->nlp_listp)) {
+		spin_lock_irq(shost->host_lock);
+		list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
+		spin_unlock_irq(shost->host_lock);
+	} else if (old_state)
+		lpfc_nlp_counters(vport, old_state, -1);
+
+	ndlp->nlp_state = state;
+	lpfc_nlp_counters(vport, state, 1);
+	lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
+}
+
+void
+lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (list_empty(&ndlp->nlp_listp)) {
+		spin_lock_irq(shost->host_lock);
+		list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
+		spin_unlock_irq(shost->host_lock);
+	}
+}
+
+void
+lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	lpfc_cancel_retry_delay_tmo(vport, ndlp);
+	if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
+		lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
+	spin_lock_irq(shost->host_lock);
+	list_del_init(&ndlp->nlp_listp);
+	spin_unlock_irq(shost->host_lock);
+	lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
+				NLP_STE_UNUSED_NODE);
+}
+
+static void
+lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	lpfc_cancel_retry_delay_tmo(vport, ndlp);
+	if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
+		lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
+	lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
+				NLP_STE_UNUSED_NODE);
+}
+/**
+ * lpfc_initialize_node - Initialize all fields of node object
+ * @vport: Pointer to Virtual Port object.
+ * @ndlp: Pointer to FC node object.
+ * @did: FC_ID of the node.
+ *
+ * This function is always called when node object need to be initialized.
+ * It initializes all the fields of the node object. Although the reference
+ * to phba from @ndlp can be obtained indirectly through it's reference to
+ * @vport, a direct reference to phba is taken here by @ndlp. This is due
+ * to the life-span of the @ndlp might go beyond the existence of @vport as
+ * the final release of ndlp is determined by its reference count. And, the
+ * operation on @ndlp needs the reference to phba.
+ **/
+static inline void
+lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+	uint32_t did)
+{
+	INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
+	INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
+	init_timer(&ndlp->nlp_delayfunc);
+	ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
+	ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
+	ndlp->nlp_DID = did;
+	ndlp->vport = vport;
+	ndlp->phba = vport->phba;
+	ndlp->nlp_sid = NLP_NO_SID;
+	kref_init(&ndlp->kref);
+	NLP_INT_NODE_ACT(ndlp);
+	atomic_set(&ndlp->cmd_pending, 0);
+	ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
+	if (vport->phba->sli_rev == LPFC_SLI_REV4)
+		ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+}
+
+struct lpfc_nodelist *
+lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		 int state)
+{
+	struct lpfc_hba *phba = vport->phba;
+	uint32_t did;
+	unsigned long flags;
+
+	if (!ndlp)
+		return NULL;
+
+	spin_lock_irqsave(&phba->ndlp_lock, flags);
+	/* The ndlp should not be in memory free mode */
+	if (NLP_CHK_FREE_REQ(ndlp)) {
+		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+				"0277 lpfc_enable_node: ndlp:x%p "
+				"usgmap:x%x refcnt:%d\n",
+				(void *)ndlp, ndlp->nlp_usg_map,
+				atomic_read(&ndlp->kref.refcount));
+		return NULL;
+	}
+	/* The ndlp should not already be in active mode */
+	if (NLP_CHK_NODE_ACT(ndlp)) {
+		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+				"0278 lpfc_enable_node: ndlp:x%p "
+				"usgmap:x%x refcnt:%d\n",
+				(void *)ndlp, ndlp->nlp_usg_map,
+				atomic_read(&ndlp->kref.refcount));
+		return NULL;
+	}
+
+	/* Keep the original DID */
+	did = ndlp->nlp_DID;
+
+	/* re-initialize ndlp except of ndlp linked list pointer */
+	memset((((char *)ndlp) + sizeof (struct list_head)), 0,
+		sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
+	lpfc_initialize_node(vport, ndlp, did);
+
+	spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+
+	if (state != NLP_STE_UNUSED_NODE)
+		lpfc_nlp_set_state(vport, ndlp, state);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+		"node enable:       did:x%x",
+		ndlp->nlp_DID, 0, 0);
+	return ndlp;
+}
+
+void
+lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	/*
+	 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
+	 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
+	 * the ndlp from the vport. The ndlp marked as UNUSED on the list
+	 * until ALL other outstanding threads have completed. We check
+	 * that the ndlp not already in the UNUSED state before we proceed.
+	 */
+	if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+		return;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
+	if (vport->phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_cleanup_vports_rrqs(vport, ndlp);
+	lpfc_nlp_put(ndlp);
+	return;
+}
+
+/*
+ * Start / ReStart rescue timer for Discovery / RSCN handling
+ */
+void
+lpfc_set_disctmo(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	uint32_t tmo;
+
+	if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
+		/* For FAN, timeout should be greater than edtov */
+		tmo = (((phba->fc_edtov + 999) / 1000) + 1);
+	} else {
+		/* Normal discovery timeout should be > than ELS/CT timeout
+		 * FC spec states we need 3 * ratov for CT requests
+		 */
+		tmo = ((phba->fc_ratov * 3) + 3);
+	}
+
+
+	if (!timer_pending(&vport->fc_disctmo)) {
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+			"set disc timer:  tmo:x%x state:x%x flg:x%x",
+			tmo, vport->port_state, vport->fc_flag);
+	}
+
+	mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag |= FC_DISC_TMO;
+	spin_unlock_irq(shost->host_lock);
+
+	/* Start Discovery Timer state <hba_state> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0247 Start Discovery Timer state x%x "
+			 "Data: x%x x%lx x%x x%x\n",
+			 vport->port_state, tmo,
+			 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
+			 vport->fc_adisc_cnt);
+
+	return;
+}
+
+/*
+ * Cancel rescue timer for Discovery / RSCN handling
+ */
+int
+lpfc_can_disctmo(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	unsigned long iflags;
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"can disc timer:  state:x%x rtry:x%x flg:x%x",
+		vport->port_state, vport->fc_ns_retry, vport->fc_flag);
+
+	/* Turn off discovery timer if its running */
+	if (vport->fc_flag & FC_DISC_TMO) {
+		spin_lock_irqsave(shost->host_lock, iflags);
+		vport->fc_flag &= ~FC_DISC_TMO;
+		spin_unlock_irqrestore(shost->host_lock, iflags);
+		del_timer_sync(&vport->fc_disctmo);
+		spin_lock_irqsave(&vport->work_port_lock, iflags);
+		vport->work_port_events &= ~WORKER_DISC_TMO;
+		spin_unlock_irqrestore(&vport->work_port_lock, iflags);
+	}
+
+	/* Cancel Discovery Timer state <hba_state> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0248 Cancel Discovery Timer state x%x "
+			 "Data: x%x x%x x%x\n",
+			 vport->port_state, vport->fc_flag,
+			 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
+	return 0;
+}
+
+/*
+ * Check specified ring for outstanding IOCB on the SLI queue
+ * Return true if iocb matches the specified nport
+ */
+int
+lpfc_check_sli_ndlp(struct lpfc_hba *phba,
+		    struct lpfc_sli_ring *pring,
+		    struct lpfc_iocbq *iocb,
+		    struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	IOCB_t *icmd = &iocb->iocb;
+	struct lpfc_vport    *vport = ndlp->vport;
+
+	if (iocb->vport != vport)
+		return 0;
+
+	if (pring->ringno == LPFC_ELS_RING) {
+		switch (icmd->ulpCommand) {
+		case CMD_GEN_REQUEST64_CR:
+			if (iocb->context_un.ndlp == ndlp)
+				return 1;
+		case CMD_ELS_REQUEST64_CR:
+			if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
+				return 1;
+		case CMD_XMIT_ELS_RSP64_CX:
+			if (iocb->context1 == (uint8_t *) ndlp)
+				return 1;
+		}
+	} else if (pring->ringno == psli->extra_ring) {
+
+	} else if (pring->ringno == psli->fcp_ring) {
+		/* Skip match check if waiting to relogin to FCP target */
+		if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
+		    (ndlp->nlp_flag & NLP_DELAY_TMO)) {
+			return 0;
+		}
+		if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
+			return 1;
+		}
+	} else if (pring->ringno == psli->next_ring) {
+
+	}
+	return 0;
+}
+
+/*
+ * Free resources / clean up outstanding I/Os
+ * associated with nlp_rpi in the LPFC_NODELIST entry.
+ */
+static int
+lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+	LIST_HEAD(completions);
+	struct lpfc_sli *psli;
+	struct lpfc_sli_ring *pring;
+	struct lpfc_iocbq *iocb, *next_iocb;
+	uint32_t i;
+
+	lpfc_fabric_abort_nport(ndlp);
+
+	/*
+	 * Everything that matches on txcmplq will be returned
+	 * by firmware with a no rpi error.
+	 */
+	psli = &phba->sli;
+	if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+		/* Now process each ring */
+		for (i = 0; i < psli->num_rings; i++) {
+			pring = &psli->ring[i];
+
+			spin_lock_irq(&phba->hbalock);
+			list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
+						 list) {
+				/*
+				 * Check to see if iocb matches the nport we are
+				 * looking for
+				 */
+				if ((lpfc_check_sli_ndlp(phba, pring, iocb,
+							 ndlp))) {
+					/* It matches, so deque and call compl
+					   with an error */
+					list_move_tail(&iocb->list,
+						       &completions);
+					pring->txq_cnt--;
+				}
+			}
+			spin_unlock_irq(&phba->hbalock);
+		}
+	}
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_ABORTED);
+
+	return 0;
+}
+
+/*
+ * Free rpi associated with LPFC_NODELIST entry.
+ * This routine is called from lpfc_freenode(), when we are removing
+ * a LPFC_NODELIST entry. It is also called if the driver initiates a
+ * LOGO that completes successfully, and we are waiting to PLOGI back
+ * to the remote NPort. In addition, it is called after we receive
+ * and unsolicated ELS cmd, send back a rsp, the rsp completes and
+ * we are waiting to PLOGI back to the remote NPort.
+ */
+int
+lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba *phba = vport->phba;
+	LPFC_MBOXQ_t    *mbox;
+	int rc;
+	uint16_t rpi;
+
+	if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (mbox) {
+			/* SLI4 ports require the physical rpi value. */
+			rpi = ndlp->nlp_rpi;
+			if (phba->sli_rev == LPFC_SLI_REV4)
+				rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+			lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
+			mbox->vport = vport;
+			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+			if (rc == MBX_NOT_FINISHED)
+				mempool_free(mbox, phba->mbox_mem_pool);
+		}
+		lpfc_no_rpi(phba, ndlp);
+
+		if (phba->sli_rev != LPFC_SLI_REV4)
+			ndlp->nlp_rpi = 0;
+		ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
+		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unregister all the currently registered RPIs
+ * to the HBA.
+ **/
+void
+lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost;
+	int i;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (!vports) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+			"2884 Vport array allocation failed \n");
+		return;
+	}
+	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+		shost = lpfc_shost_from_vport(vports[i]);
+		spin_lock_irq(shost->host_lock);
+		list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
+			if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+				/* The mempool_alloc might sleep */
+				spin_unlock_irq(shost->host_lock);
+				lpfc_unreg_rpi(vports[i], ndlp);
+				spin_lock_irq(shost->host_lock);
+			}
+		}
+		spin_unlock_irq(shost->host_lock);
+	}
+	lpfc_destroy_vport_work_array(phba, vports);
+}
+
+void
+lpfc_unreg_all_rpis(struct lpfc_vport *vport)
+{
+	struct lpfc_hba  *phba  = vport->phba;
+	LPFC_MBOXQ_t     *mbox;
+	int rc;
+
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		lpfc_sli4_unreg_all_rpis(vport);
+		return;
+	}
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (mbox) {
+		lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
+				 mbox);
+		mbox->vport = vport;
+		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		mbox->context1 = NULL;
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+		if (rc != MBX_TIMEOUT)
+			mempool_free(mbox, phba->mbox_mem_pool);
+
+		if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
+				"1836 Could not issue "
+				"unreg_login(all_rpis) status %d\n", rc);
+	}
+}
+
+void
+lpfc_unreg_default_rpis(struct lpfc_vport *vport)
+{
+	struct lpfc_hba  *phba  = vport->phba;
+	LPFC_MBOXQ_t     *mbox;
+	int rc;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (mbox) {
+		lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
+			       mbox);
+		mbox->vport = vport;
+		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		mbox->context1 = NULL;
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+		if (rc != MBX_TIMEOUT)
+			mempool_free(mbox, phba->mbox_mem_pool);
+
+		if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
+					 "1815 Could not issue "
+					 "unreg_did (default rpis) status %d\n",
+					 rc);
+	}
+}
+
+/*
+ * Free resources associated with LPFC_NODELIST entry
+ * so it can be freed.
+ */
+static int
+lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	LPFC_MBOXQ_t *mb, *nextmb;
+	struct lpfc_dmabuf *mp;
+
+	/* Cleanup node for NPort <nlp_DID> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+			 "0900 Cleanup node for NPort x%x "
+			 "Data: x%x x%x x%x\n",
+			 ndlp->nlp_DID, ndlp->nlp_flag,
+			 ndlp->nlp_state, ndlp->nlp_rpi);
+	if (NLP_CHK_FREE_REQ(ndlp)) {
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+				"0280 lpfc_cleanup_node: ndlp:x%p "
+				"usgmap:x%x refcnt:%d\n",
+				(void *)ndlp, ndlp->nlp_usg_map,
+				atomic_read(&ndlp->kref.refcount));
+		lpfc_dequeue_node(vport, ndlp);
+	} else {
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+				"0281 lpfc_cleanup_node: ndlp:x%p "
+				"usgmap:x%x refcnt:%d\n",
+				(void *)ndlp, ndlp->nlp_usg_map,
+				atomic_read(&ndlp->kref.refcount));
+		lpfc_disable_node(vport, ndlp);
+	}
+
+	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
+	if ((mb = phba->sli.mbox_active)) {
+		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
+		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+			mb->context2 = NULL;
+			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		}
+	}
+
+	spin_lock_irq(&phba->hbalock);
+	/* Cleanup REG_LOGIN completions which are not yet processed */
+	list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
+		if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
+			(ndlp != (struct lpfc_nodelist *) mb->context2))
+			continue;
+
+		mb->context2 = NULL;
+		mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	}
+
+	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
+		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
+		    (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+			mp = (struct lpfc_dmabuf *) (mb->context1);
+			if (mp) {
+				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
+				kfree(mp);
+			}
+			list_del(&mb->list);
+			mempool_free(mb, phba->mbox_mem_pool);
+			/* We shall not invoke the lpfc_nlp_put to decrement
+			 * the ndlp reference count as we are in the process
+			 * of lpfc_nlp_release.
+			 */
+		}
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_els_abort(phba, ndlp);
+
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~NLP_DELAY_TMO;
+	spin_unlock_irq(shost->host_lock);
+
+	ndlp->nlp_last_elscmd = 0;
+	del_timer_sync(&ndlp->nlp_delayfunc);
+
+	list_del_init(&ndlp->els_retry_evt.evt_listp);
+	list_del_init(&ndlp->dev_loss_evt.evt_listp);
+	lpfc_cleanup_vports_rrqs(vport, ndlp);
+	lpfc_unreg_rpi(vport, ndlp);
+
+	return 0;
+}
+
+/*
+ * Check to see if we can free the nlp back to the freelist.
+ * If we are in the middle of using the nlp in the discovery state
+ * machine, defer the free till we reach the end of the state machine.
+ */
+static void
+lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_rport_data *rdata;
+	LPFC_MBOXQ_t *mbox;
+	int rc;
+
+	lpfc_cancel_retry_delay_tmo(vport, ndlp);
+	if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
+	    !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
+	    !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
+		/* For this case we need to cleanup the default rpi
+		 * allocated by the firmware.
+		 */
+		if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
+			!= NULL) {
+			rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
+			    (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
+			if (rc) {
+				mempool_free(mbox, phba->mbox_mem_pool);
+			}
+			else {
+				mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
+				mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+				mbox->vport = vport;
+				mbox->context2 = NULL;
+				rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+				if (rc == MBX_NOT_FINISHED) {
+					mempool_free(mbox, phba->mbox_mem_pool);
+				}
+			}
+		}
+	}
+	lpfc_cleanup_node(vport, ndlp);
+
+	/*
+	 * We can get here with a non-NULL ndlp->rport because when we
+	 * unregister a rport we don't break the rport/node linkage.  So if we
+	 * do, make sure we don't leaving any dangling pointers behind.
+	 */
+	if (ndlp->rport) {
+		rdata = ndlp->rport->dd_data;
+		rdata->pnode = NULL;
+		ndlp->rport = NULL;
+	}
+}
+
+static int
+lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+	      uint32_t did)
+{
+	D_ID mydid, ndlpdid, matchdid;
+
+	if (did == Bcast_DID)
+		return 0;
+
+	/* First check for Direct match */
+	if (ndlp->nlp_DID == did)
+		return 1;
+
+	/* Next check for area/domain identically equals 0 match */
+	mydid.un.word = vport->fc_myDID;
+	if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
+		return 0;
+	}
+
+	matchdid.un.word = did;
+	ndlpdid.un.word = ndlp->nlp_DID;
+	if (matchdid.un.b.id == ndlpdid.un.b.id) {
+		if ((mydid.un.b.domain == matchdid.un.b.domain) &&
+		    (mydid.un.b.area == matchdid.un.b.area)) {
+			if ((ndlpdid.un.b.domain == 0) &&
+			    (ndlpdid.un.b.area == 0)) {
+				if (ndlpdid.un.b.id)
+					return 1;
+			}
+			return 0;
+		}
+
+		matchdid.un.word = ndlp->nlp_DID;
+		if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
+		    (mydid.un.b.area == ndlpdid.un.b.area)) {
+			if ((matchdid.un.b.domain == 0) &&
+			    (matchdid.un.b.area == 0)) {
+				if (matchdid.un.b.id)
+					return 1;
+			}
+		}
+	}
+	return 0;
+}
+
+/* Search for a nodelist entry */
+static struct lpfc_nodelist *
+__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
+{
+	struct lpfc_nodelist *ndlp;
+	uint32_t data1;
+
+	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+		if (lpfc_matchdid(vport, ndlp, did)) {
+			data1 = (((uint32_t) ndlp->nlp_state << 24) |
+				 ((uint32_t) ndlp->nlp_xri << 16) |
+				 ((uint32_t) ndlp->nlp_type << 8) |
+				 ((uint32_t) ndlp->nlp_rpi & 0xff));
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+					 "0929 FIND node DID "
+					 "Data: x%p x%x x%x x%x\n",
+					 ndlp, ndlp->nlp_DID,
+					 ndlp->nlp_flag, data1);
+			return ndlp;
+		}
+	}
+
+	/* FIND node did <did> NOT FOUND */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+			 "0932 FIND node did x%x NOT FOUND.\n", did);
+	return NULL;
+}
+
+struct lpfc_nodelist *
+lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp;
+	unsigned long iflags;
+
+	spin_lock_irqsave(shost->host_lock, iflags);
+	ndlp = __lpfc_findnode_did(vport, did);
+	spin_unlock_irqrestore(shost->host_lock, iflags);
+	return ndlp;
+}
+
+struct lpfc_nodelist *
+lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp;
+
+	ndlp = lpfc_findnode_did(vport, did);
+	if (!ndlp) {
+		if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
+		    lpfc_rscn_payload_check(vport, did) == 0)
+			return NULL;
+		ndlp = (struct lpfc_nodelist *)
+		     mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
+		if (!ndlp)
+			return NULL;
+		lpfc_nlp_init(vport, ndlp, did);
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+		spin_unlock_irq(shost->host_lock);
+		return ndlp;
+	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
+		if (!ndlp)
+			return NULL;
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+		spin_unlock_irq(shost->host_lock);
+		return ndlp;
+	}
+
+	if ((vport->fc_flag & FC_RSCN_MODE) &&
+	    !(vport->fc_flag & FC_NDISC_ACTIVE)) {
+		if (lpfc_rscn_payload_check(vport, did)) {
+			/* If we've already received a PLOGI from this NPort
+			 * we don't need to try to discover it again.
+			 */
+			if (ndlp->nlp_flag & NLP_RCV_PLOGI)
+				return NULL;
+
+			/* Since this node is marked for discovery,
+			 * delay timeout is not needed.
+			 */
+			lpfc_cancel_retry_delay_tmo(vport, ndlp);
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+			spin_unlock_irq(shost->host_lock);
+		} else
+			ndlp = NULL;
+	} else {
+		/* If we've already received a PLOGI from this NPort,
+		 * or we are already in the process of discovery on it,
+		 * we don't need to try to discover it again.
+		 */
+		if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
+		    ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
+		    ndlp->nlp_flag & NLP_RCV_PLOGI)
+			return NULL;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+		spin_unlock_irq(shost->host_lock);
+	}
+	return ndlp;
+}
+
+/* Build a list of nodes to discover based on the loopmap */
+void
+lpfc_disc_list_loopmap(struct lpfc_vport *vport)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	int j;
+	uint32_t alpa, index;
+
+	if (!lpfc_is_link_up(phba))
+		return;
+
+	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
+		return;
+
+	/* Check for loop map present or not */
+	if (phba->alpa_map[0]) {
+		for (j = 1; j <= phba->alpa_map[0]; j++) {
+			alpa = phba->alpa_map[j];
+			if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
+				continue;
+			lpfc_setup_disc_node(vport, alpa);
+		}
+	} else {
+		/* No alpamap, so try all alpa's */
+		for (j = 0; j < FC_MAXLOOP; j++) {
+			/* If cfg_scan_down is set, start from highest
+			 * ALPA (0xef) to lowest (0x1).
+			 */
+			if (vport->cfg_scan_down)
+				index = j;
+			else
+				index = FC_MAXLOOP - j - 1;
+			alpa = lpfcAlpaArray[index];
+			if ((vport->fc_myDID & 0xff) == alpa)
+				continue;
+			lpfc_setup_disc_node(vport, alpa);
+		}
+	}
+	return;
+}
+
+void
+lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+	LPFC_MBOXQ_t *mbox;
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
+	struct lpfc_sli_ring *fcp_ring   = &psli->ring[psli->fcp_ring];
+	struct lpfc_sli_ring *next_ring  = &psli->ring[psli->next_ring];
+	int  rc;
+
+	/*
+	 * if it's not a physical port or if we already send
+	 * clear_la then don't send it.
+	 */
+	if ((phba->link_state >= LPFC_CLEAR_LA) ||
+	    (vport->port_type != LPFC_PHYSICAL_PORT) ||
+		(phba->sli_rev == LPFC_SLI_REV4))
+		return;
+
+			/* Link up discovery */
+	if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
+		phba->link_state = LPFC_CLEAR_LA;
+		lpfc_clear_la(phba, mbox);
+		mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+		mbox->vport = vport;
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+		if (rc == MBX_NOT_FINISHED) {
+			mempool_free(mbox, phba->mbox_mem_pool);
+			lpfc_disc_flush_list(vport);
+			extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
+			fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
+			next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
+			phba->link_state = LPFC_HBA_ERROR;
+		}
+	}
+}
+
+/* Reg_vpi to tell firmware to resume normal operations */
+void
+lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+	LPFC_MBOXQ_t *regvpimbox;
+
+	regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (regvpimbox) {
+		lpfc_reg_vpi(vport, regvpimbox);
+		regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
+		regvpimbox->vport = vport;
+		if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
+					== MBX_NOT_FINISHED) {
+			mempool_free(regvpimbox, phba->mbox_mem_pool);
+		}
+	}
+}
+
+/* Start Link up / RSCN discovery on NPR nodes */
+void
+lpfc_disc_start(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	uint32_t num_sent;
+	uint32_t clear_la_pending;
+	int did_changed;
+
+	if (!lpfc_is_link_up(phba))
+		return;
+
+	if (phba->link_state == LPFC_CLEAR_LA)
+		clear_la_pending = 1;
+	else
+		clear_la_pending = 0;
+
+	if (vport->port_state < LPFC_VPORT_READY)
+		vport->port_state = LPFC_DISC_AUTH;
+
+	lpfc_set_disctmo(vport);
+
+	if (vport->fc_prevDID == vport->fc_myDID)
+		did_changed = 0;
+	else
+		did_changed = 1;
+
+	vport->fc_prevDID = vport->fc_myDID;
+	vport->num_disc_nodes = 0;
+
+	/* Start Discovery state <hba_state> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0202 Start Discovery hba state x%x "
+			 "Data: x%x x%x x%x\n",
+			 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
+			 vport->fc_adisc_cnt);
+
+	/* First do ADISCs - if any */
+	num_sent = lpfc_els_disc_adisc(vport);
+
+	if (num_sent)
+		return;
+
+	/* Register the VPI for SLI3, NON-NPIV only. */
+	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+	    !(vport->fc_flag & FC_PT2PT) &&
+	    !(vport->fc_flag & FC_RSCN_MODE) &&
+	    (phba->sli_rev < LPFC_SLI_REV4)) {
+		lpfc_issue_reg_vpi(phba, vport);
+		return;
+	}
+
+	/*
+	 * For SLI2, we need to set port_state to READY and continue
+	 * discovery.
+	 */
+	if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
+		/* If we get here, there is nothing to ADISC */
+		if (vport->port_type == LPFC_PHYSICAL_PORT)
+			lpfc_issue_clear_la(phba, vport);
+
+		if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
+			vport->num_disc_nodes = 0;
+			/* go thru NPR nodes and issue ELS PLOGIs */
+			if (vport->fc_npr_cnt)
+				lpfc_els_disc_plogi(vport);
+
+			if (!vport->num_disc_nodes) {
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag &= ~FC_NDISC_ACTIVE;
+				spin_unlock_irq(shost->host_lock);
+				lpfc_can_disctmo(vport);
+			}
+		}
+		vport->port_state = LPFC_VPORT_READY;
+	} else {
+		/* Next do PLOGIs - if any */
+		num_sent = lpfc_els_disc_plogi(vport);
+
+		if (num_sent)
+			return;
+
+		if (vport->fc_flag & FC_RSCN_MODE) {
+			/* Check to see if more RSCNs came in while we
+			 * were processing this one.
+			 */
+			if ((vport->fc_rscn_id_cnt == 0) &&
+			    (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag &= ~FC_RSCN_MODE;
+				spin_unlock_irq(shost->host_lock);
+				lpfc_can_disctmo(vport);
+			} else
+				lpfc_els_handle_rscn(vport);
+		}
+	}
+	return;
+}
+
+/*
+ *  Ignore completion for all IOCBs on tx and txcmpl queue for ELS
+ *  ring the match the sppecified nodelist.
+ */
+static void
+lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+	LIST_HEAD(completions);
+	struct lpfc_sli *psli;
+	IOCB_t     *icmd;
+	struct lpfc_iocbq    *iocb, *next_iocb;
+	struct lpfc_sli_ring *pring;
+
+	psli = &phba->sli;
+	pring = &psli->ring[LPFC_ELS_RING];
+
+	/* Error matching iocb on txq or txcmplq
+	 * First check the txq.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+		if (iocb->context1 != ndlp) {
+			continue;
+		}
+		icmd = &iocb->iocb;
+		if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
+		    (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
+
+			list_move_tail(&iocb->list, &completions);
+			pring->txq_cnt--;
+		}
+	}
+
+	/* Next check the txcmplq */
+	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
+		if (iocb->context1 != ndlp) {
+			continue;
+		}
+		icmd = &iocb->iocb;
+		if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
+		    icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
+			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+		}
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_ABORTED);
+}
+
+static void
+lpfc_disc_flush_list(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp, *next_ndlp;
+	struct lpfc_hba *phba = vport->phba;
+
+	if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
+		list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
+					 nlp_listp) {
+			if (!NLP_CHK_NODE_ACT(ndlp))
+				continue;
+			if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
+			    ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
+				lpfc_free_tx(phba, ndlp);
+			}
+		}
+	}
+}
+
+void
+lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
+{
+	lpfc_els_flush_rscn(vport);
+	lpfc_els_flush_cmd(vport);
+	lpfc_disc_flush_list(vport);
+}
+
+/*****************************************************************************/
+/*
+ * NAME:     lpfc_disc_timeout
+ *
+ * FUNCTION: Fibre Channel driver discovery timeout routine.
+ *
+ * EXECUTION ENVIRONMENT: interrupt only
+ *
+ * CALLED FROM:
+ *      Timer function
+ *
+ * RETURNS:
+ *      none
+ */
+/*****************************************************************************/
+void
+lpfc_disc_timeout(unsigned long ptr)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
+	struct lpfc_hba   *phba = vport->phba;
+	uint32_t tmo_posted;
+	unsigned long flags = 0;
+
+	if (unlikely(!phba))
+		return;
+
+	spin_lock_irqsave(&vport->work_port_lock, flags);
+	tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
+	if (!tmo_posted)
+		vport->work_port_events |= WORKER_DISC_TMO;
+	spin_unlock_irqrestore(&vport->work_port_lock, flags);
+
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+	return;
+}
+
+static void
+lpfc_disc_timeout_handler(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_sli  *psli = &phba->sli;
+	struct lpfc_nodelist *ndlp, *next_ndlp;
+	LPFC_MBOXQ_t *initlinkmbox;
+	int rc, clrlaerr = 0;
+
+	if (!(vport->fc_flag & FC_DISC_TMO))
+		return;
+
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag &= ~FC_DISC_TMO;
+	spin_unlock_irq(shost->host_lock);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"disc timeout:    state:x%x rtry:x%x flg:x%x",
+		vport->port_state, vport->fc_ns_retry, vport->fc_flag);
+
+	switch (vport->port_state) {
+
+	case LPFC_LOCAL_CFG_LINK:
+	/* port_state is identically  LPFC_LOCAL_CFG_LINK while waiting for
+	 * FAN
+	 */
+				/* FAN timeout */
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
+				 "0221 FAN timeout\n");
+		/* Start discovery by sending FLOGI, clean up old rpis */
+		list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
+					 nlp_listp) {
+			if (!NLP_CHK_NODE_ACT(ndlp))
+				continue;
+			if (ndlp->nlp_state != NLP_STE_NPR_NODE)
+				continue;
+			if (ndlp->nlp_type & NLP_FABRIC) {
+				/* Clean up the ndlp on Fabric connections */
+				lpfc_drop_node(vport, ndlp);
+
+			} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
+				/* Fail outstanding IO now since device
+				 * is marked for PLOGI.
+				 */
+				lpfc_unreg_rpi(vport, ndlp);
+			}
+		}
+		if (vport->port_state != LPFC_FLOGI) {
+			if (phba->sli_rev <= LPFC_SLI_REV3)
+				lpfc_initial_flogi(vport);
+			else
+				lpfc_issue_init_vfi(vport);
+			return;
+		}
+		break;
+
+	case LPFC_FDISC:
+	case LPFC_FLOGI:
+	/* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
+		/* Initial FLOGI timeout */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "0222 Initial %s timeout\n",
+				 vport->vpi ? "FDISC" : "FLOGI");
+
+		/* Assume no Fabric and go on with discovery.
+		 * Check for outstanding ELS FLOGI to abort.
+		 */
+
+		/* FLOGI failed, so just use loop map to make discovery list */
+		lpfc_disc_list_loopmap(vport);
+
+		/* Start discovery */
+		lpfc_disc_start(vport);
+		break;
+
+	case LPFC_FABRIC_CFG_LINK:
+	/* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
+	   NameServer login */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "0223 Timeout while waiting for "
+				 "NameServer login\n");
+		/* Next look for NameServer ndlp */
+		ndlp = lpfc_findnode_did(vport, NameServer_DID);
+		if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+			lpfc_els_abort(phba, ndlp);
+
+		/* ReStart discovery */
+		goto restart_disc;
+
+	case LPFC_NS_QRY:
+	/* Check for wait for NameServer Rsp timeout */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "0224 NameServer Query timeout "
+				 "Data: x%x x%x\n",
+				 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
+
+		if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
+			/* Try it one more time */
+			vport->fc_ns_retry++;
+			rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
+					 vport->fc_ns_retry, 0);
+			if (rc == 0)
+				break;
+		}
+		vport->fc_ns_retry = 0;
+
+restart_disc:
+		/*
+		 * Discovery is over.
+		 * set port_state to PORT_READY if SLI2.
+		 * cmpl_reg_vpi will set port_state to READY for SLI3.
+		 */
+		if (phba->sli_rev < LPFC_SLI_REV4) {
+			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+				lpfc_issue_reg_vpi(phba, vport);
+			else  {
+				lpfc_issue_clear_la(phba, vport);
+				vport->port_state = LPFC_VPORT_READY;
+			}
+		}
+
+		/* Setup and issue mailbox INITIALIZE LINK command */
+		initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (!initlinkmbox) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+					 "0206 Device Discovery "
+					 "completion error\n");
+			phba->link_state = LPFC_HBA_ERROR;
+			break;
+		}
+
+		lpfc_linkdown(phba);
+		lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
+			       phba->cfg_link_speed);
+		initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
+		initlinkmbox->vport = vport;
+		initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
+		lpfc_set_loopback_flag(phba);
+		if (rc == MBX_NOT_FINISHED)
+			mempool_free(initlinkmbox, phba->mbox_mem_pool);
+
+		break;
+
+	case LPFC_DISC_AUTH:
+	/* Node Authentication timeout */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "0227 Node Authentication timeout\n");
+		lpfc_disc_flush_list(vport);
+
+		/*
+		 * set port_state to PORT_READY if SLI2.
+		 * cmpl_reg_vpi will set port_state to READY for SLI3.
+		 */
+		if (phba->sli_rev < LPFC_SLI_REV4) {
+			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+				lpfc_issue_reg_vpi(phba, vport);
+			else  {	/* NPIV Not enabled */
+				lpfc_issue_clear_la(phba, vport);
+				vport->port_state = LPFC_VPORT_READY;
+			}
+		}
+		break;
+
+	case LPFC_VPORT_READY:
+		if (vport->fc_flag & FC_RSCN_MODE) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+					 "0231 RSCN timeout Data: x%x "
+					 "x%x\n",
+					 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
+
+			/* Cleanup any outstanding ELS commands */
+			lpfc_els_flush_cmd(vport);
+
+			lpfc_els_flush_rscn(vport);
+			lpfc_disc_flush_list(vport);
+		}
+		break;
+
+	default:
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "0273 Unexpected discovery timeout, "
+				 "vport State x%x\n", vport->port_state);
+		break;
+	}
+
+	switch (phba->link_state) {
+	case LPFC_CLEAR_LA:
+				/* CLEAR LA timeout */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "0228 CLEAR LA timeout\n");
+		clrlaerr = 1;
+		break;
+
+	case LPFC_LINK_UP:
+		lpfc_issue_clear_la(phba, vport);
+		/* Drop thru */
+	case LPFC_LINK_UNKNOWN:
+	case LPFC_WARM_START:
+	case LPFC_INIT_START:
+	case LPFC_INIT_MBX_CMDS:
+	case LPFC_LINK_DOWN:
+	case LPFC_HBA_ERROR:
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				 "0230 Unexpected timeout, hba link "
+				 "state x%x\n", phba->link_state);
+		clrlaerr = 1;
+		break;
+
+	case LPFC_HBA_READY:
+		break;
+	}
+
+	if (clrlaerr) {
+		lpfc_disc_flush_list(vport);
+		psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+		psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+		psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+		vport->port_state = LPFC_VPORT_READY;
+	}
+
+	return;
+}
+
+/*
+ * This routine handles processing a NameServer REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is
+ * handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+	struct lpfc_dmabuf   *mp = (struct lpfc_dmabuf *) (pmb->context1);
+	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+	struct lpfc_vport    *vport = pmb->vport;
+
+	pmb->context1 = NULL;
+	pmb->context2 = NULL;
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		ndlp->nlp_rpi = mb->un.varWords[0];
+	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+	ndlp->nlp_type |= NLP_FABRIC;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+
+	/*
+	 * Start issuing Fabric-Device Management Interface (FDMI) command to
+	 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
+	 * fdmi-on=2 (supporting RPA/hostnmae)
+	 */
+
+	if (vport->cfg_fdmi_on == 1)
+		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
+	else
+		mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
+
+	/* decrement the node reference count held for this callback
+	 * function.
+	 */
+	lpfc_nlp_put(ndlp);
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+	return;
+}
+
+static int
+lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
+{
+	uint16_t *rpi = param;
+
+	/* check for active node */
+	if (!NLP_CHK_NODE_ACT(ndlp))
+		return 0;
+
+	return ndlp->nlp_rpi == *rpi;
+}
+
+static int
+lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
+{
+	return memcmp(&ndlp->nlp_portname, param,
+		      sizeof(ndlp->nlp_portname)) == 0;
+}
+
+static struct lpfc_nodelist *
+__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
+{
+	struct lpfc_nodelist *ndlp;
+
+	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+		if (filter(ndlp, param))
+			return ndlp;
+	}
+	return NULL;
+}
+
+/*
+ * This routine looks up the ndlp lists for the given RPI. If rpi found it
+ * returns the node list element pointer else return NULL.
+ */
+struct lpfc_nodelist *
+__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
+{
+	return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
+}
+
+/*
+ * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
+ * returns the node element list pointer else return NULL.
+ */
+struct lpfc_nodelist *
+lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp;
+
+	spin_lock_irq(shost->host_lock);
+	ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
+	spin_unlock_irq(shost->host_lock);
+	return ndlp;
+}
+
+/*
+ * This routine looks up the ndlp lists for the given RPI. If the rpi
+ * is found, the routine returns the node element list pointer else
+ * return NULL.
+ */
+struct lpfc_nodelist *
+lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_nodelist *ndlp;
+
+	spin_lock_irq(shost->host_lock);
+	ndlp = __lpfc_findnode_rpi(vport, rpi);
+	spin_unlock_irq(shost->host_lock);
+	return ndlp;
+}
+
+/**
+ * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: the physical host virtual N_Port identifier.
+ *
+ * This routine finds a vport on a HBA (referred by @phba) through a
+ * @vpi. The function walks the HBA's vport list and returns the address
+ * of the vport with the matching @vpi.
+ *
+ * Return code
+ *    NULL - No vport with the matching @vpi found
+ *    Otherwise - Address to the vport with the matching @vpi.
+ **/
+struct lpfc_vport *
+lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
+{
+	struct lpfc_vport *vport;
+	unsigned long flags;
+	int i = 0;
+
+	/* The physical ports are always vpi 0 - translate is unnecessary. */
+	if (vpi > 0) {
+		/*
+		 * Translate the physical vpi to the logical vpi.  The
+		 * vport stores the logical vpi.
+		 */
+		for (i = 0; i < phba->max_vpi; i++) {
+			if (vpi == phba->vpi_ids[i])
+				break;
+		}
+
+		if (i >= phba->max_vpi) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+					 "2936 Could not find Vport mapped "
+					 "to vpi %d\n", vpi);
+			return NULL;
+		}
+	}
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	list_for_each_entry(vport, &phba->port_list, listentry) {
+		if (vport->vpi == i) {
+			spin_unlock_irqrestore(&phba->hbalock, flags);
+			return vport;
+		}
+	}
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	return NULL;
+}
+
+void
+lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+	      uint32_t did)
+{
+	memset(ndlp, 0, sizeof (struct lpfc_nodelist));
+
+	lpfc_initialize_node(vport, ndlp, did);
+	INIT_LIST_HEAD(&ndlp->nlp_listp);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+		"node init:       did:x%x",
+		ndlp->nlp_DID, 0, 0);
+
+	return;
+}
+
+/* This routine releases all resources associated with a specifc NPort's ndlp
+ * and mempool_free's the nodelist.
+ */
+static void
+lpfc_nlp_release(struct kref *kref)
+{
+	struct lpfc_hba *phba;
+	unsigned long flags;
+	struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
+						  kref);
+
+	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+		"node release:    did:x%x flg:x%x type:x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+
+	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+			"0279 lpfc_nlp_release: ndlp:x%p "
+			"usgmap:x%x refcnt:%d\n",
+			(void *)ndlp, ndlp->nlp_usg_map,
+			atomic_read(&ndlp->kref.refcount));
+
+	/* remove ndlp from action. */
+	lpfc_nlp_remove(ndlp->vport, ndlp);
+
+	/* clear the ndlp active flag for all release cases */
+	phba = ndlp->phba;
+	spin_lock_irqsave(&phba->ndlp_lock, flags);
+	NLP_CLR_NODE_ACT(ndlp);
+	spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+
+	/* free ndlp memory for final ndlp release */
+	if (NLP_CHK_FREE_REQ(ndlp)) {
+		kfree(ndlp->lat_data);
+		mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
+	}
+}
+
+/* This routine bumps the reference count for a ndlp structure to ensure
+ * that one discovery thread won't free a ndlp while another discovery thread
+ * is using it.
+ */
+struct lpfc_nodelist *
+lpfc_nlp_get(struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba *phba;
+	unsigned long flags;
+
+	if (ndlp) {
+		lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+			"node get:        did:x%x flg:x%x refcnt:x%x",
+			ndlp->nlp_DID, ndlp->nlp_flag,
+			atomic_read(&ndlp->kref.refcount));
+		/* The check of ndlp usage to prevent incrementing the
+		 * ndlp reference count that is in the process of being
+		 * released.
+		 */
+		phba = ndlp->phba;
+		spin_lock_irqsave(&phba->ndlp_lock, flags);
+		if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
+			spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+			lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
+				"0276 lpfc_nlp_get: ndlp:x%p "
+				"usgmap:x%x refcnt:%d\n",
+				(void *)ndlp, ndlp->nlp_usg_map,
+				atomic_read(&ndlp->kref.refcount));
+			return NULL;
+		} else
+			kref_get(&ndlp->kref);
+		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+	}
+	return ndlp;
+}
+
+/* This routine decrements the reference count for a ndlp structure. If the
+ * count goes to 0, this indicates the the associated nodelist should be
+ * freed. Returning 1 indicates the ndlp resource has been released; on the
+ * other hand, returning 0 indicates the ndlp resource has not been released
+ * yet.
+ */
+int
+lpfc_nlp_put(struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba *phba;
+	unsigned long flags;
+
+	if (!ndlp)
+		return 1;
+
+	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+	"node put:        did:x%x flg:x%x refcnt:x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag,
+		atomic_read(&ndlp->kref.refcount));
+	phba = ndlp->phba;
+	spin_lock_irqsave(&phba->ndlp_lock, flags);
+	/* Check the ndlp memory free acknowledge flag to avoid the
+	 * possible race condition that kref_put got invoked again
+	 * after previous one has done ndlp memory free.
+	 */
+	if (NLP_CHK_FREE_ACK(ndlp)) {
+		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+		lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
+				"0274 lpfc_nlp_put: ndlp:x%p "
+				"usgmap:x%x refcnt:%d\n",
+				(void *)ndlp, ndlp->nlp_usg_map,
+				atomic_read(&ndlp->kref.refcount));
+		return 1;
+	}
+	/* Check the ndlp inactivate log flag to avoid the possible
+	 * race condition that kref_put got invoked again after ndlp
+	 * is already in inactivating state.
+	 */
+	if (NLP_CHK_IACT_REQ(ndlp)) {
+		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+		lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
+				"0275 lpfc_nlp_put: ndlp:x%p "
+				"usgmap:x%x refcnt:%d\n",
+				(void *)ndlp, ndlp->nlp_usg_map,
+				atomic_read(&ndlp->kref.refcount));
+		return 1;
+	}
+	/* For last put, mark the ndlp usage flags to make sure no
+	 * other kref_get and kref_put on the same ndlp shall get
+	 * in between the process when the final kref_put has been
+	 * invoked on this ndlp.
+	 */
+	if (atomic_read(&ndlp->kref.refcount) == 1) {
+		/* Indicate ndlp is put to inactive state. */
+		NLP_SET_IACT_REQ(ndlp);
+		/* Acknowledge ndlp memory free has been seen. */
+		if (NLP_CHK_FREE_REQ(ndlp))
+			NLP_SET_FREE_ACK(ndlp);
+	}
+	spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+	/* Note, the kref_put returns 1 when decrementing a reference
+	 * count that was 1, it invokes the release callback function,
+	 * but it still left the reference count as 1 (not actually
+	 * performs the last decrementation). Otherwise, it actually
+	 * decrements the reference count and returns 0.
+	 */
+	return kref_put(&ndlp->kref, lpfc_nlp_release);
+}
+
+/* This routine free's the specified nodelist if it is not in use
+ * by any other discovery thread. This routine returns 1 if the
+ * ndlp has been freed. A return value of 0 indicates the ndlp is
+ * not yet been released.
+ */
+int
+lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
+{
+	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+		"node not used:   did:x%x flg:x%x refcnt:x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag,
+		atomic_read(&ndlp->kref.refcount));
+	if (atomic_read(&ndlp->kref.refcount) == 1)
+		if (lpfc_nlp_put(ndlp))
+			return 1;
+	return 0;
+}
+
+/**
+ * lpfc_fcf_inuse - Check if FCF can be unregistered.
+ * @phba: Pointer to hba context object.
+ *
+ * This function iterate through all FC nodes associated
+ * will all vports to check if there is any node with
+ * fc_rports associated with it. If there is an fc_rport
+ * associated with the node, then the node is either in
+ * discovered state or its devloss_timer is pending.
+ */
+static int
+lpfc_fcf_inuse(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	int i, ret = 0;
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host  *shost;
+
+	vports = lpfc_create_vport_work_array(phba);
+
+	/* If driver cannot allocate memory, indicate fcf is in use */
+	if (!vports)
+		return 1;
+
+	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+		shost = lpfc_shost_from_vport(vports[i]);
+		spin_lock_irq(shost->host_lock);
+		/*
+		 * IF the CVL_RCVD bit is not set then we have sent the
+		 * flogi.
+		 * If dev_loss fires while we are waiting we do not want to
+		 * unreg the fcf.
+		 */
+		if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
+			spin_unlock_irq(shost->host_lock);
+			ret =  1;
+			goto out;
+		}
+		list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
+			if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
+			  (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
+				ret = 1;
+				spin_unlock_irq(shost->host_lock);
+				goto out;
+			} else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+				ret = 1;
+				lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+						"2624 RPI %x DID %x flag %x "
+						"still logged in\n",
+						ndlp->nlp_rpi, ndlp->nlp_DID,
+						ndlp->nlp_flag);
+			}
+		}
+		spin_unlock_irq(shost->host_lock);
+	}
+out:
+	lpfc_destroy_vport_work_array(phba, vports);
+	return ret;
+}
+
+/**
+ * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
+ * @phba: Pointer to hba context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * This function frees memory associated with the mailbox command.
+ */
+void
+lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_vport *vport = mboxq->vport;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (mboxq->u.mb.mbxStatus) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+			"2555 UNREG_VFI mbxStatus error x%x "
+			"HBA state x%x\n",
+			mboxq->u.mb.mbxStatus, vport->port_state);
+	}
+	spin_lock_irq(shost->host_lock);
+	phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
+	spin_unlock_irq(shost->host_lock);
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
+ * @phba: Pointer to hba context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * This function frees memory associated with the mailbox command.
+ */
+static void
+lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_vport *vport = mboxq->vport;
+
+	if (mboxq->u.mb.mbxStatus) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+			"2550 UNREG_FCFI mbxStatus error x%x "
+			"HBA state x%x\n",
+			mboxq->u.mb.mbxStatus, vport->port_state);
+	}
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_unregister_fcf_prep - Unregister fcf record preparation
+ * @phba: Pointer to hba context object.
+ *
+ * This function prepare the HBA for unregistering the currently registered
+ * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
+ * VFIs.
+ */
+int
+lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost;
+	int i, rc;
+
+	/* Unregister RPIs */
+	if (lpfc_fcf_inuse(phba))
+		lpfc_unreg_hba_rpis(phba);
+
+	/* At this point, all discovery is aborted */
+	phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+
+	/* Unregister VPIs */
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			/* Stop FLOGI/FDISC retries */
+			ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
+			if (ndlp)
+				lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
+			lpfc_cleanup_pending_mbox(vports[i]);
+			if (phba->sli_rev == LPFC_SLI_REV4)
+				lpfc_sli4_unreg_all_rpis(vports[i]);
+			lpfc_mbx_unreg_vpi(vports[i]);
+			shost = lpfc_shost_from_vport(vports[i]);
+			spin_lock_irq(shost->host_lock);
+			vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
+			spin_unlock_irq(shost->host_lock);
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+
+	/* Cleanup any outstanding ELS commands */
+	lpfc_els_flush_all_cmd(phba);
+
+	/* Unregister the physical port VFI */
+	rc = lpfc_issue_unreg_vfi(phba->pport);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
+ * @phba: Pointer to hba context object.
+ *
+ * This function issues synchronous unregister FCF mailbox command to HBA to
+ * unregister the currently registered FCF record. The driver does not reset
+ * the driver FCF usage state flags.
+ *
+ * Return 0 if successfully issued, none-zero otherwise.
+ */
+int
+lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+				"2551 UNREG_FCFI mbox allocation failed"
+				"HBA state x%x\n", phba->pport->port_state);
+		return -ENOMEM;
+	}
+	lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
+	mbox->vport = phba->pport;
+	mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2552 Unregister FCFI command failed rc x%x "
+				"HBA state x%x\n",
+				rc, phba->pport->port_state);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
+ * @phba: Pointer to hba context object.
+ *
+ * This function unregisters the currently reigstered FCF. This function
+ * also tries to find another FCF for discovery by rescan the HBA FCF table.
+ */
+void
+lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
+{
+	int rc;
+
+	/* Preparation for unregistering fcf */
+	rc = lpfc_unregister_fcf_prep(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+				"2748 Failed to prepare for unregistering "
+				"HBA's FCF record: rc=%d\n", rc);
+		return;
+	}
+
+	/* Now, unregister FCF record and reset HBA FCF state */
+	rc = lpfc_sli4_unregister_fcf(phba);
+	if (rc)
+		return;
+	/* Reset HBA FCF states after successful unregister FCF */
+	phba->fcf.fcf_flag = 0;
+	phba->fcf.current_rec.flag = 0;
+
+	/*
+	 * If driver is not unloading, check if there is any other
+	 * FCF record that can be used for discovery.
+	 */
+	if ((phba->pport->load_flag & FC_UNLOADING) ||
+	    (phba->link_state < LPFC_LINK_UP))
+		return;
+
+	/* This is considered as the initial FCF discovery scan */
+	spin_lock_irq(&phba->hbalock);
+	phba->fcf.fcf_flag |= FCF_INIT_DISC;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Reset FCF roundrobin bmask for new discovery */
+	lpfc_sli4_clear_fcf_rr_bmask(phba);
+
+	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
+
+	if (rc) {
+		spin_lock_irq(&phba->hbalock);
+		phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+				"2553 lpfc_unregister_unused_fcf failed "
+				"to read FCF record HBA state x%x\n",
+				phba->pport->port_state);
+	}
+}
+
+/**
+ * lpfc_unregister_fcf - Unregister the currently registered fcf record
+ * @phba: Pointer to hba context object.
+ *
+ * This function just unregisters the currently reigstered FCF. It does not
+ * try to find another FCF for discovery.
+ */
+void
+lpfc_unregister_fcf(struct lpfc_hba *phba)
+{
+	int rc;
+
+	/* Preparation for unregistering fcf */
+	rc = lpfc_unregister_fcf_prep(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+				"2749 Failed to prepare for unregistering "
+				"HBA's FCF record: rc=%d\n", rc);
+		return;
+	}
+
+	/* Now, unregister FCF record and reset HBA FCF state */
+	rc = lpfc_sli4_unregister_fcf(phba);
+	if (rc)
+		return;
+	/* Set proper HBA FCF states after successful unregister FCF */
+	spin_lock_irq(&phba->hbalock);
+	phba->fcf.fcf_flag &= ~FCF_REGISTERED;
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
+ * @phba: Pointer to hba context object.
+ *
+ * This function check if there are any connected remote port for the FCF and
+ * if all the devices are disconnected, this function unregister FCFI.
+ * This function also tries to use another FCF for discovery.
+ */
+void
+lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
+{
+	/*
+	 * If HBA is not running in FIP mode, if HBA does not support
+	 * FCoE, if FCF discovery is ongoing, or if FCF has not been
+	 * registered, do nothing.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	if (!(phba->hba_flag & HBA_FCOE_MODE) ||
+	    !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
+	    !(phba->hba_flag & HBA_FIP_SUPPORT) ||
+	    (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
+	    (phba->pport->port_state == LPFC_FLOGI)) {
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	if (lpfc_fcf_inuse(phba))
+		return;
+
+	lpfc_unregister_fcf_rescan(phba);
+}
+
+/**
+ * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
+ * @phba: Pointer to hba context object.
+ * @buff: Buffer containing the FCF connection table as in the config
+ *         region.
+ * This function create driver data structure for the FCF connection
+ * record table read from config region 23.
+ */
+static void
+lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
+	uint8_t *buff)
+{
+	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
+	struct lpfc_fcf_conn_hdr *conn_hdr;
+	struct lpfc_fcf_conn_rec *conn_rec;
+	uint32_t record_count;
+	int i;
+
+	/* Free the current connect table */
+	list_for_each_entry_safe(conn_entry, next_conn_entry,
+		&phba->fcf_conn_rec_list, list) {
+		list_del_init(&conn_entry->list);
+		kfree(conn_entry);
+	}
+
+	conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
+	record_count = conn_hdr->length * sizeof(uint32_t)/
+		sizeof(struct lpfc_fcf_conn_rec);
+
+	conn_rec = (struct lpfc_fcf_conn_rec *)
+		(buff + sizeof(struct lpfc_fcf_conn_hdr));
+
+	for (i = 0; i < record_count; i++) {
+		if (!(conn_rec[i].flags & FCFCNCT_VALID))
+			continue;
+		conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
+			GFP_KERNEL);
+		if (!conn_entry) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2566 Failed to allocate connection"
+				" table entry\n");
+			return;
+		}
+
+		memcpy(&conn_entry->conn_rec, &conn_rec[i],
+			sizeof(struct lpfc_fcf_conn_rec));
+		conn_entry->conn_rec.vlan_tag =
+			le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF;
+		conn_entry->conn_rec.flags =
+			le16_to_cpu(conn_entry->conn_rec.flags);
+		list_add_tail(&conn_entry->list,
+			&phba->fcf_conn_rec_list);
+	}
+}
+
+/**
+ * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
+ * @phba: Pointer to hba context object.
+ * @buff: Buffer containing the FCoE parameter data structure.
+ *
+ *  This function update driver data structure with config
+ *  parameters read from config region 23.
+ */
+static void
+lpfc_read_fcoe_param(struct lpfc_hba *phba,
+			uint8_t *buff)
+{
+	struct lpfc_fip_param_hdr *fcoe_param_hdr;
+	struct lpfc_fcoe_params *fcoe_param;
+
+	fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
+		buff;
+	fcoe_param = (struct lpfc_fcoe_params *)
+		(buff + sizeof(struct lpfc_fip_param_hdr));
+
+	if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
+		(fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
+		return;
+
+	if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
+		phba->valid_vlan = 1;
+		phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
+			0xFFF;
+	}
+
+	phba->fc_map[0] = fcoe_param->fc_map[0];
+	phba->fc_map[1] = fcoe_param->fc_map[1];
+	phba->fc_map[2] = fcoe_param->fc_map[2];
+	return;
+}
+
+/**
+ * lpfc_get_rec_conf23 - Get a record type in config region data.
+ * @buff: Buffer containing config region 23 data.
+ * @size: Size of the data buffer.
+ * @rec_type: Record type to be searched.
+ *
+ * This function searches config region data to find the beginning
+ * of the record specified by record_type. If record found, this
+ * function return pointer to the record else return NULL.
+ */
+static uint8_t *
+lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
+{
+	uint32_t offset = 0, rec_length;
+
+	if ((buff[0] == LPFC_REGION23_LAST_REC) ||
+		(size < sizeof(uint32_t)))
+		return NULL;
+
+	rec_length = buff[offset + 1];
+
+	/*
+	 * One TLV record has one word header and number of data words
+	 * specified in the rec_length field of the record header.
+	 */
+	while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
+		<= size) {
+		if (buff[offset] == rec_type)
+			return &buff[offset];
+
+		if (buff[offset] == LPFC_REGION23_LAST_REC)
+			return NULL;
+
+		offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
+		rec_length = buff[offset + 1];
+	}
+	return NULL;
+}
+
+/**
+ * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
+ * @phba: Pointer to lpfc_hba data structure.
+ * @buff: Buffer containing config region 23 data.
+ * @size: Size of the data buffer.
+ *
+ * This function parses the FCoE config parameters in config region 23 and
+ * populate driver data structure with the parameters.
+ */
+void
+lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
+		uint8_t *buff,
+		uint32_t size)
+{
+	uint32_t offset = 0, rec_length;
+	uint8_t *rec_ptr;
+
+	/*
+	 * If data size is less than 2 words signature and version cannot be
+	 * verified.
+	 */
+	if (size < 2*sizeof(uint32_t))
+		return;
+
+	/* Check the region signature first */
+	if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2567 Config region 23 has bad signature\n");
+		return;
+	}
+
+	offset += 4;
+
+	/* Check the data structure version */
+	if (buff[offset] != LPFC_REGION23_VERSION) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2568 Config region 23 has bad version\n");
+		return;
+	}
+	offset += 4;
+
+	rec_length = buff[offset + 1];
+
+	/* Read FCoE param record */
+	rec_ptr = lpfc_get_rec_conf23(&buff[offset],
+			size - offset, FCOE_PARAM_TYPE);
+	if (rec_ptr)
+		lpfc_read_fcoe_param(phba, rec_ptr);
+
+	/* Read FCF connection table */
+	rec_ptr = lpfc_get_rec_conf23(&buff[offset],
+		size - offset, FCOE_CONN_TBL_TYPE);
+	if (rec_ptr)
+		lpfc_read_fcf_conn_tbl(phba, rec_ptr);
+
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_hw.h b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_hw.h
new file mode 100644
index 0000000..5f280b5
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_hw.h
@@ -0,0 +1,3835 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#define FDMI_DID        0xfffffaU
+#define NameServer_DID  0xfffffcU
+#define SCR_DID         0xfffffdU
+#define Fabric_DID      0xfffffeU
+#define Bcast_DID       0xffffffU
+#define Mask_DID        0xffffffU
+#define CT_DID_MASK     0xffff00U
+#define Fabric_DID_MASK 0xfff000U
+#define WELL_KNOWN_DID_MASK 0xfffff0U
+
+#define PT2PT_LocalID	1
+#define PT2PT_RemoteID	2
+
+#define FF_DEF_EDTOV          2000	/* Default E_D_TOV (2000ms) */
+#define FF_DEF_ALTOV            15	/* Default AL_TIME (15ms) */
+#define FF_DEF_RATOV             2	/* Default RA_TOV (2s) */
+#define FF_DEF_ARBTOV         1900	/* Default ARB_TOV (1900ms) */
+
+#define LPFC_BUF_RING0        64	/* Number of buffers to post to RING
+					   0 */
+
+#define FCELSSIZE             1024	/* maximum ELS transfer size */
+
+#define LPFC_FCP_RING            0	/* ring 0 for FCP initiator commands */
+#define LPFC_EXTRA_RING          1	/* ring 1 for other protocols */
+#define LPFC_ELS_RING            2	/* ring 2 for ELS commands */
+#define LPFC_FCP_NEXT_RING       3
+
+#define SLI2_IOCB_CMD_R0_ENTRIES    172	/* SLI-2 FCP command ring entries */
+#define SLI2_IOCB_RSP_R0_ENTRIES    134	/* SLI-2 FCP response ring entries */
+#define SLI2_IOCB_CMD_R1_ENTRIES      4	/* SLI-2 extra command ring entries */
+#define SLI2_IOCB_RSP_R1_ENTRIES      4	/* SLI-2 extra response ring entries */
+#define SLI2_IOCB_CMD_R1XTRA_ENTRIES 36	/* SLI-2 extra FCP cmd ring entries */
+#define SLI2_IOCB_RSP_R1XTRA_ENTRIES 52	/* SLI-2 extra FCP rsp ring entries */
+#define SLI2_IOCB_CMD_R2_ENTRIES     20	/* SLI-2 ELS command ring entries */
+#define SLI2_IOCB_RSP_R2_ENTRIES     20	/* SLI-2 ELS response ring entries */
+#define SLI2_IOCB_CMD_R3_ENTRIES      0
+#define SLI2_IOCB_RSP_R3_ENTRIES      0
+#define SLI2_IOCB_CMD_R3XTRA_ENTRIES 24
+#define SLI2_IOCB_RSP_R3XTRA_ENTRIES 32
+
+#define SLI2_IOCB_CMD_SIZE	32
+#define SLI2_IOCB_RSP_SIZE	32
+#define SLI3_IOCB_CMD_SIZE	128
+#define SLI3_IOCB_RSP_SIZE	64
+
+#define LPFC_UNREG_ALL_RPIS_VPORT	0xffff
+#define LPFC_UNREG_ALL_DFLT_RPIS	0xffffffff
+
+/* vendor ID used in SCSI netlink calls */
+#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
+
+#define FW_REV_STR_SIZE	32
+/* Common Transport structures and definitions */
+
+union CtRevisionId {
+	/* Structure is in Big Endian format */
+	struct {
+		uint32_t Revision:8;
+		uint32_t InId:24;
+	} bits;
+	uint32_t word;
+};
+
+union CtCommandResponse {
+	/* Structure is in Big Endian format */
+	struct {
+		uint32_t CmdRsp:16;
+		uint32_t Size:16;
+	} bits;
+	uint32_t word;
+};
+
+#define FC4_FEATURE_INIT 0x2
+#define FC4_FEATURE_TARGET 0x1
+
+struct lpfc_sli_ct_request {
+	/* Structure is in Big Endian format */
+	union CtRevisionId RevisionId;
+	uint8_t FsType;
+	uint8_t FsSubType;
+	uint8_t Options;
+	uint8_t Rsrvd1;
+	union CtCommandResponse CommandResponse;
+	uint8_t Rsrvd2;
+	uint8_t ReasonCode;
+	uint8_t Explanation;
+	uint8_t VendorUnique;
+
+	union {
+		uint32_t PortID;
+		struct gid {
+			uint8_t PortType;	/* for GID_PT requests */
+			uint8_t DomainScope;
+			uint8_t AreaScope;
+			uint8_t Fc4Type;	/* for GID_FT requests */
+		} gid;
+		struct rft {
+			uint32_t PortId;	/* For RFT_ID requests */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+			uint32_t rsvd0:16;
+			uint32_t rsvd1:7;
+			uint32_t fcpReg:1;	/* Type 8 */
+			uint32_t rsvd2:2;
+			uint32_t ipReg:1;	/* Type 5 */
+			uint32_t rsvd3:5;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+			uint32_t rsvd0:16;
+			uint32_t fcpReg:1;	/* Type 8 */
+			uint32_t rsvd1:7;
+			uint32_t rsvd3:5;
+			uint32_t ipReg:1;	/* Type 5 */
+			uint32_t rsvd2:2;
+#endif
+
+			uint32_t rsvd[7];
+		} rft;
+		struct rnn {
+			uint32_t PortId;	/* For RNN_ID requests */
+			uint8_t wwnn[8];
+		} rnn;
+		struct rsnn {	/* For RSNN_ID requests */
+			uint8_t wwnn[8];
+			uint8_t len;
+			uint8_t symbname[255];
+		} rsnn;
+		struct da_id { /* For DA_ID requests */
+			uint32_t port_id;
+		} da_id;
+		struct rspn {	/* For RSPN_ID requests */
+			uint32_t PortId;
+			uint8_t len;
+			uint8_t symbname[255];
+		} rspn;
+		struct gff {
+			uint32_t PortId;
+		} gff;
+		struct gff_acc {
+			uint8_t fbits[128];
+		} gff_acc;
+#define FCP_TYPE_FEATURE_OFFSET 7
+		struct rff {
+			uint32_t PortId;
+			uint8_t reserved[2];
+			uint8_t fbits;
+			uint8_t type_code;     /* type=8 for FCP */
+		} rff;
+	} un;
+};
+
+#define  SLI_CT_REVISION        1
+#define  GID_REQUEST_SZ   (offsetof(struct lpfc_sli_ct_request, un) + \
+			   sizeof(struct gid))
+#define  GFF_REQUEST_SZ   (offsetof(struct lpfc_sli_ct_request, un) + \
+			   sizeof(struct gff))
+#define  RFT_REQUEST_SZ   (offsetof(struct lpfc_sli_ct_request, un) + \
+			   sizeof(struct rft))
+#define  RFF_REQUEST_SZ   (offsetof(struct lpfc_sli_ct_request, un) + \
+			   sizeof(struct rff))
+#define  RNN_REQUEST_SZ   (offsetof(struct lpfc_sli_ct_request, un) + \
+			   sizeof(struct rnn))
+#define  RSNN_REQUEST_SZ  (offsetof(struct lpfc_sli_ct_request, un) + \
+			   sizeof(struct rsnn))
+#define DA_ID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+			  sizeof(struct da_id))
+#define  RSPN_REQUEST_SZ  (offsetof(struct lpfc_sli_ct_request, un) + \
+			   sizeof(struct rspn))
+
+/*
+ * FsType Definitions
+ */
+
+#define  SLI_CT_MANAGEMENT_SERVICE        0xFA
+#define  SLI_CT_TIME_SERVICE              0xFB
+#define  SLI_CT_DIRECTORY_SERVICE         0xFC
+#define  SLI_CT_FABRIC_CONTROLLER_SERVICE 0xFD
+
+/*
+ * Directory Service Subtypes
+ */
+
+#define  SLI_CT_DIRECTORY_NAME_SERVER     0x02
+
+/*
+ * Response Codes
+ */
+
+#define  SLI_CT_RESPONSE_FS_RJT           0x8001
+#define  SLI_CT_RESPONSE_FS_ACC           0x8002
+
+/*
+ * Reason Codes
+ */
+
+#define  SLI_CT_NO_ADDITIONAL_EXPL	  0x0
+#define  SLI_CT_INVALID_COMMAND           0x01
+#define  SLI_CT_INVALID_VERSION           0x02
+#define  SLI_CT_LOGICAL_ERROR             0x03
+#define  SLI_CT_INVALID_IU_SIZE           0x04
+#define  SLI_CT_LOGICAL_BUSY              0x05
+#define  SLI_CT_PROTOCOL_ERROR            0x07
+#define  SLI_CT_UNABLE_TO_PERFORM_REQ     0x09
+#define  SLI_CT_REQ_NOT_SUPPORTED         0x0b
+#define  SLI_CT_HBA_INFO_NOT_REGISTERED	  0x10
+#define  SLI_CT_MULTIPLE_HBA_ATTR_OF_SAME_TYPE  0x11
+#define  SLI_CT_INVALID_HBA_ATTR_BLOCK_LEN      0x12
+#define  SLI_CT_HBA_ATTR_NOT_PRESENT	  0x13
+#define  SLI_CT_PORT_INFO_NOT_REGISTERED  0x20
+#define  SLI_CT_MULTIPLE_PORT_ATTR_OF_SAME_TYPE 0x21
+#define  SLI_CT_INVALID_PORT_ATTR_BLOCK_LEN     0x22
+#define  SLI_CT_VENDOR_UNIQUE             0xff
+
+/*
+ * Name Server SLI_CT_UNABLE_TO_PERFORM_REQ Explanations
+ */
+
+#define  SLI_CT_NO_PORT_ID                0x01
+#define  SLI_CT_NO_PORT_NAME              0x02
+#define  SLI_CT_NO_NODE_NAME              0x03
+#define  SLI_CT_NO_CLASS_OF_SERVICE       0x04
+#define  SLI_CT_NO_IP_ADDRESS             0x05
+#define  SLI_CT_NO_IPA                    0x06
+#define  SLI_CT_NO_FC4_TYPES              0x07
+#define  SLI_CT_NO_SYMBOLIC_PORT_NAME     0x08
+#define  SLI_CT_NO_SYMBOLIC_NODE_NAME     0x09
+#define  SLI_CT_NO_PORT_TYPE              0x0A
+#define  SLI_CT_ACCESS_DENIED             0x10
+#define  SLI_CT_INVALID_PORT_ID           0x11
+#define  SLI_CT_DATABASE_EMPTY            0x12
+
+/*
+ * Name Server Command Codes
+ */
+
+#define  SLI_CTNS_GA_NXT      0x0100
+#define  SLI_CTNS_GPN_ID      0x0112
+#define  SLI_CTNS_GNN_ID      0x0113
+#define  SLI_CTNS_GCS_ID      0x0114
+#define  SLI_CTNS_GFT_ID      0x0117
+#define  SLI_CTNS_GSPN_ID     0x0118
+#define  SLI_CTNS_GPT_ID      0x011A
+#define  SLI_CTNS_GFF_ID      0x011F
+#define  SLI_CTNS_GID_PN      0x0121
+#define  SLI_CTNS_GID_NN      0x0131
+#define  SLI_CTNS_GIP_NN      0x0135
+#define  SLI_CTNS_GIPA_NN     0x0136
+#define  SLI_CTNS_GSNN_NN     0x0139
+#define  SLI_CTNS_GNN_IP      0x0153
+#define  SLI_CTNS_GIPA_IP     0x0156
+#define  SLI_CTNS_GID_FT      0x0171
+#define  SLI_CTNS_GID_PT      0x01A1
+#define  SLI_CTNS_RPN_ID      0x0212
+#define  SLI_CTNS_RNN_ID      0x0213
+#define  SLI_CTNS_RCS_ID      0x0214
+#define  SLI_CTNS_RFT_ID      0x0217
+#define  SLI_CTNS_RSPN_ID     0x0218
+#define  SLI_CTNS_RPT_ID      0x021A
+#define  SLI_CTNS_RFF_ID      0x021F
+#define  SLI_CTNS_RIP_NN      0x0235
+#define  SLI_CTNS_RIPA_NN     0x0236
+#define  SLI_CTNS_RSNN_NN     0x0239
+#define  SLI_CTNS_DA_ID       0x0300
+
+/*
+ * Port Types
+ */
+
+#define  SLI_CTPT_N_PORT      0x01
+#define  SLI_CTPT_NL_PORT     0x02
+#define  SLI_CTPT_FNL_PORT    0x03
+#define  SLI_CTPT_IP          0x04
+#define  SLI_CTPT_FCP         0x08
+#define  SLI_CTPT_NX_PORT     0x7F
+#define  SLI_CTPT_F_PORT      0x81
+#define  SLI_CTPT_FL_PORT     0x82
+#define  SLI_CTPT_E_PORT      0x84
+
+#define SLI_CT_LAST_ENTRY     0x80000000
+
+/* Fibre Channel Service Parameter definitions */
+
+#define FC_PH_4_0   6		/* FC-PH version 4.0 */
+#define FC_PH_4_1   7		/* FC-PH version 4.1 */
+#define FC_PH_4_2   8		/* FC-PH version 4.2 */
+#define FC_PH_4_3   9		/* FC-PH version 4.3 */
+
+#define FC_PH_LOW   8		/* Lowest supported FC-PH version */
+#define FC_PH_HIGH  9		/* Highest supported FC-PH version */
+#define FC_PH3   0x20		/* FC-PH-3 version */
+
+#define FF_FRAME_SIZE     2048
+
+struct lpfc_name {
+	union {
+		struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+			uint8_t nameType:4;	/* FC Word 0, bit 28:31 */
+			uint8_t IEEEextMsn:4;	/* FC Word 0, bit 24:27, bit
+						   8:11 of IEEE ext */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+			uint8_t IEEEextMsn:4;	/* FC Word 0, bit 24:27, bit
+						   8:11 of IEEE ext */
+			uint8_t nameType:4;	/* FC Word 0, bit 28:31 */
+#endif
+
+#define NAME_IEEE           0x1	/* IEEE name - nameType */
+#define NAME_IEEE_EXT       0x2	/* IEEE extended name */
+#define NAME_FC_TYPE        0x3	/* FC native name type */
+#define NAME_IP_TYPE        0x4	/* IP address */
+#define NAME_CCITT_TYPE     0xC
+#define NAME_CCITT_GR_TYPE  0xE
+			uint8_t IEEEextLsb;	/* FC Word 0, bit 16:23, IEEE
+						   extended Lsb */
+			uint8_t IEEE[6];	/* FC IEEE address */
+		} s;
+		uint8_t wwn[8];
+	} u;
+};
+
+struct csp {
+	uint8_t fcphHigh;	/* FC Word 0, byte 0 */
+	uint8_t fcphLow;
+	uint8_t bbCreditMsb;
+	uint8_t bbCreditlsb;	/* FC Word 0, byte 3 */
+
+/*
+ * Word 1 Bit 31 in common service parameter is overloaded.
+ * Word 1 Bit 31 in FLOGI request is multiple NPort request
+ * Word 1 Bit 31 in FLOGI response is clean address bit
+ */
+#define clean_address_bit request_multiple_Nport /* Word 1, bit 31 */
+/*
+ * Word 1 Bit 30 in common service parameter is overloaded.
+ * Word 1 Bit 30 in FLOGI request is Virtual Fabrics
+ * Word 1 Bit 30 in PLOGI request is random offset
+ */
+#define virtual_fabric_support randomOffset /* Word 1, bit 30 */
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t request_multiple_Nport:1;	/* FC Word 1, bit 31 */
+	uint16_t randomOffset:1;	/* FC Word 1, bit 30 */
+	uint16_t response_multiple_NPort:1;	/* FC Word 1, bit 29 */
+	uint16_t fPort:1;	/* FC Word 1, bit 28 */
+	uint16_t altBbCredit:1;	/* FC Word 1, bit 27 */
+	uint16_t edtovResolution:1;	/* FC Word 1, bit 26 */
+	uint16_t multicast:1;	/* FC Word 1, bit 25 */
+	uint16_t broadcast:1;	/* FC Word 1, bit 24 */
+
+	uint16_t huntgroup:1;	/* FC Word 1, bit 23 */
+	uint16_t simplex:1;	/* FC Word 1, bit 22 */
+	uint16_t word1Reserved1:3;	/* FC Word 1, bit 21:19 */
+	uint16_t dhd:1;		/* FC Word 1, bit 18 */
+	uint16_t contIncSeqCnt:1;	/* FC Word 1, bit 17 */
+	uint16_t payloadlength:1;	/* FC Word 1, bit 16 */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t broadcast:1;	/* FC Word 1, bit 24 */
+	uint16_t multicast:1;	/* FC Word 1, bit 25 */
+	uint16_t edtovResolution:1;	/* FC Word 1, bit 26 */
+	uint16_t altBbCredit:1;	/* FC Word 1, bit 27 */
+	uint16_t fPort:1;	/* FC Word 1, bit 28 */
+	uint16_t response_multiple_NPort:1;	/* FC Word 1, bit 29 */
+	uint16_t randomOffset:1;	/* FC Word 1, bit 30 */
+	uint16_t request_multiple_Nport:1;	/* FC Word 1, bit 31 */
+
+	uint16_t payloadlength:1;	/* FC Word 1, bit 16 */
+	uint16_t contIncSeqCnt:1;	/* FC Word 1, bit 17 */
+	uint16_t dhd:1;		/* FC Word 1, bit 18 */
+	uint16_t word1Reserved1:3;	/* FC Word 1, bit 21:19 */
+	uint16_t simplex:1;	/* FC Word 1, bit 22 */
+	uint16_t huntgroup:1;	/* FC Word 1, bit 23 */
+#endif
+
+	uint8_t bbRcvSizeMsb;	/* Upper nibble is reserved */
+	uint8_t bbRcvSizeLsb;	/* FC Word 1, byte 3 */
+	union {
+		struct {
+			uint8_t word2Reserved1;	/* FC Word 2 byte 0 */
+
+			uint8_t totalConcurrSeq;	/* FC Word 2 byte 1 */
+			uint8_t roByCategoryMsb;	/* FC Word 2 byte 2 */
+
+			uint8_t roByCategoryLsb;	/* FC Word 2 byte 3 */
+		} nPort;
+		uint32_t r_a_tov;	/* R_A_TOV must be in B.E. format */
+	} w2;
+
+	uint32_t e_d_tov;	/* E_D_TOV must be in B.E. format */
+};
+
+struct class_parms {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t classValid:1;	/* FC Word 0, bit 31 */
+	uint8_t intermix:1;	/* FC Word 0, bit 30 */
+	uint8_t stackedXparent:1;	/* FC Word 0, bit 29 */
+	uint8_t stackedLockDown:1;	/* FC Word 0, bit 28 */
+	uint8_t seqDelivery:1;	/* FC Word 0, bit 27 */
+	uint8_t word0Reserved1:3;	/* FC Word 0, bit 24:26 */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint8_t word0Reserved1:3;	/* FC Word 0, bit 24:26 */
+	uint8_t seqDelivery:1;	/* FC Word 0, bit 27 */
+	uint8_t stackedLockDown:1;	/* FC Word 0, bit 28 */
+	uint8_t stackedXparent:1;	/* FC Word 0, bit 29 */
+	uint8_t intermix:1;	/* FC Word 0, bit 30 */
+	uint8_t classValid:1;	/* FC Word 0, bit 31 */
+
+#endif
+
+	uint8_t word0Reserved2;	/* FC Word 0, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t iCtlXidReAssgn:2;	/* FC Word 0, Bit 14:15 */
+	uint8_t iCtlInitialPa:2;	/* FC Word 0, bit 12:13 */
+	uint8_t iCtlAck0capable:1;	/* FC Word 0, bit 11 */
+	uint8_t iCtlAckNcapable:1;	/* FC Word 0, bit 10 */
+	uint8_t word0Reserved3:2;	/* FC Word 0, bit  8: 9 */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint8_t word0Reserved3:2;	/* FC Word 0, bit  8: 9 */
+	uint8_t iCtlAckNcapable:1;	/* FC Word 0, bit 10 */
+	uint8_t iCtlAck0capable:1;	/* FC Word 0, bit 11 */
+	uint8_t iCtlInitialPa:2;	/* FC Word 0, bit 12:13 */
+	uint8_t iCtlXidReAssgn:2;	/* FC Word 0, Bit 14:15 */
+#endif
+
+	uint8_t word0Reserved4;	/* FC Word 0, bit  0: 7 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t rCtlAck0capable:1;	/* FC Word 1, bit 31 */
+	uint8_t rCtlAckNcapable:1;	/* FC Word 1, bit 30 */
+	uint8_t rCtlXidInterlck:1;	/* FC Word 1, bit 29 */
+	uint8_t rCtlErrorPolicy:2;	/* FC Word 1, bit 27:28 */
+	uint8_t word1Reserved1:1;	/* FC Word 1, bit 26 */
+	uint8_t rCtlCatPerSeq:2;	/* FC Word 1, bit 24:25 */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint8_t rCtlCatPerSeq:2;	/* FC Word 1, bit 24:25 */
+	uint8_t word1Reserved1:1;	/* FC Word 1, bit 26 */
+	uint8_t rCtlErrorPolicy:2;	/* FC Word 1, bit 27:28 */
+	uint8_t rCtlXidInterlck:1;	/* FC Word 1, bit 29 */
+	uint8_t rCtlAckNcapable:1;	/* FC Word 1, bit 30 */
+	uint8_t rCtlAck0capable:1;	/* FC Word 1, bit 31 */
+#endif
+
+	uint8_t word1Reserved2;	/* FC Word 1, bit 16:23 */
+	uint8_t rcvDataSizeMsb;	/* FC Word 1, bit  8:15 */
+	uint8_t rcvDataSizeLsb;	/* FC Word 1, bit  0: 7 */
+
+	uint8_t concurrentSeqMsb;	/* FC Word 2, bit 24:31 */
+	uint8_t concurrentSeqLsb;	/* FC Word 2, bit 16:23 */
+	uint8_t EeCreditSeqMsb;	/* FC Word 2, bit  8:15 */
+	uint8_t EeCreditSeqLsb;	/* FC Word 2, bit  0: 7 */
+
+	uint8_t openSeqPerXchgMsb;	/* FC Word 3, bit 24:31 */
+	uint8_t openSeqPerXchgLsb;	/* FC Word 3, bit 16:23 */
+	uint8_t word3Reserved1;	/* Fc Word 3, bit  8:15 */
+	uint8_t word3Reserved2;	/* Fc Word 3, bit  0: 7 */
+};
+
+struct serv_parm {	/* Structure is in Big Endian format */
+	struct csp cmn;
+	struct lpfc_name portName;
+	struct lpfc_name nodeName;
+	struct class_parms cls1;
+	struct class_parms cls2;
+	struct class_parms cls3;
+	struct class_parms cls4;
+	uint8_t vendorVersion[16];
+};
+
+/*
+ * Virtual Fabric Tagging Header
+ */
+struct fc_vft_header {
+	 uint32_t word0;
+#define fc_vft_hdr_r_ctl_SHIFT		24
+#define fc_vft_hdr_r_ctl_MASK		0xFF
+#define fc_vft_hdr_r_ctl_WORD		word0
+#define fc_vft_hdr_ver_SHIFT		22
+#define fc_vft_hdr_ver_MASK		0x3
+#define fc_vft_hdr_ver_WORD		word0
+#define fc_vft_hdr_type_SHIFT		18
+#define fc_vft_hdr_type_MASK		0xF
+#define fc_vft_hdr_type_WORD		word0
+#define fc_vft_hdr_e_SHIFT		16
+#define fc_vft_hdr_e_MASK		0x1
+#define fc_vft_hdr_e_WORD		word0
+#define fc_vft_hdr_priority_SHIFT	13
+#define fc_vft_hdr_priority_MASK	0x7
+#define fc_vft_hdr_priority_WORD	word0
+#define fc_vft_hdr_vf_id_SHIFT		1
+#define fc_vft_hdr_vf_id_MASK		0xFFF
+#define fc_vft_hdr_vf_id_WORD		word0
+	uint32_t word1;
+#define fc_vft_hdr_hopct_SHIFT		24
+#define fc_vft_hdr_hopct_MASK		0xFF
+#define fc_vft_hdr_hopct_WORD		word1
+};
+
+/*
+ *  Extended Link Service LS_COMMAND codes (Payload Word 0)
+ */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define ELS_CMD_MASK      0xffff0000
+#define ELS_RSP_MASK      0xff000000
+#define ELS_CMD_LS_RJT    0x01000000
+#define ELS_CMD_ACC       0x02000000
+#define ELS_CMD_PLOGI     0x03000000
+#define ELS_CMD_FLOGI     0x04000000
+#define ELS_CMD_LOGO      0x05000000
+#define ELS_CMD_ABTX      0x06000000
+#define ELS_CMD_RCS       0x07000000
+#define ELS_CMD_RES       0x08000000
+#define ELS_CMD_RSS       0x09000000
+#define ELS_CMD_RSI       0x0A000000
+#define ELS_CMD_ESTS      0x0B000000
+#define ELS_CMD_ESTC      0x0C000000
+#define ELS_CMD_ADVC      0x0D000000
+#define ELS_CMD_RTV       0x0E000000
+#define ELS_CMD_RLS       0x0F000000
+#define ELS_CMD_ECHO      0x10000000
+#define ELS_CMD_TEST      0x11000000
+#define ELS_CMD_RRQ       0x12000000
+#define ELS_CMD_PRLI      0x20100014
+#define ELS_CMD_PRLO      0x21100014
+#define ELS_CMD_PRLO_ACC  0x02100014
+#define ELS_CMD_PDISC     0x50000000
+#define ELS_CMD_FDISC     0x51000000
+#define ELS_CMD_ADISC     0x52000000
+#define ELS_CMD_FARP      0x54000000
+#define ELS_CMD_FARPR     0x55000000
+#define ELS_CMD_RPS       0x56000000
+#define ELS_CMD_RPL       0x57000000
+#define ELS_CMD_FAN       0x60000000
+#define ELS_CMD_RSCN      0x61040000
+#define ELS_CMD_SCR       0x62000000
+#define ELS_CMD_RNID      0x78000000
+#define ELS_CMD_LIRR      0x7A000000
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+#define ELS_CMD_MASK      0xffff
+#define ELS_RSP_MASK      0xff
+#define ELS_CMD_LS_RJT    0x01
+#define ELS_CMD_ACC       0x02
+#define ELS_CMD_PLOGI     0x03
+#define ELS_CMD_FLOGI     0x04
+#define ELS_CMD_LOGO      0x05
+#define ELS_CMD_ABTX      0x06
+#define ELS_CMD_RCS       0x07
+#define ELS_CMD_RES       0x08
+#define ELS_CMD_RSS       0x09
+#define ELS_CMD_RSI       0x0A
+#define ELS_CMD_ESTS      0x0B
+#define ELS_CMD_ESTC      0x0C
+#define ELS_CMD_ADVC      0x0D
+#define ELS_CMD_RTV       0x0E
+#define ELS_CMD_RLS       0x0F
+#define ELS_CMD_ECHO      0x10
+#define ELS_CMD_TEST      0x11
+#define ELS_CMD_RRQ       0x12
+#define ELS_CMD_PRLI      0x14001020
+#define ELS_CMD_PRLO      0x14001021
+#define ELS_CMD_PRLO_ACC  0x14001002
+#define ELS_CMD_PDISC     0x50
+#define ELS_CMD_FDISC     0x51
+#define ELS_CMD_ADISC     0x52
+#define ELS_CMD_FARP      0x54
+#define ELS_CMD_FARPR     0x55
+#define ELS_CMD_RPS       0x56
+#define ELS_CMD_RPL       0x57
+#define ELS_CMD_FAN       0x60
+#define ELS_CMD_RSCN      0x0461
+#define ELS_CMD_SCR       0x62
+#define ELS_CMD_RNID      0x78
+#define ELS_CMD_LIRR      0x7A
+#endif
+
+/*
+ *  LS_RJT Payload Definition
+ */
+
+struct ls_rjt {	/* Structure is in Big Endian format */
+	union {
+		uint32_t lsRjtError;
+		struct {
+			uint8_t lsRjtRsvd0;	/* FC Word 0, bit 24:31 */
+
+			uint8_t lsRjtRsnCode;	/* FC Word 0, bit 16:23 */
+			/* LS_RJT reason codes */
+#define LSRJT_INVALID_CMD     0x01
+#define LSRJT_LOGICAL_ERR     0x03
+#define LSRJT_LOGICAL_BSY     0x05
+#define LSRJT_PROTOCOL_ERR    0x07
+#define LSRJT_UNABLE_TPC      0x09	/* Unable to perform command */
+#define LSRJT_CMD_UNSUPPORTED 0x0B
+#define LSRJT_VENDOR_UNIQUE   0xFF	/* See Byte 3 */
+
+			uint8_t lsRjtRsnCodeExp; /* FC Word 0, bit 8:15 */
+			/* LS_RJT reason explanation */
+#define LSEXP_NOTHING_MORE      0x00
+#define LSEXP_SPARM_OPTIONS     0x01
+#define LSEXP_SPARM_ICTL        0x03
+#define LSEXP_SPARM_RCTL        0x05
+#define LSEXP_SPARM_RCV_SIZE    0x07
+#define LSEXP_SPARM_CONCUR_SEQ  0x09
+#define LSEXP_SPARM_CREDIT      0x0B
+#define LSEXP_INVALID_PNAME     0x0D
+#define LSEXP_INVALID_NNAME     0x0E
+#define LSEXP_INVALID_CSP       0x0F
+#define LSEXP_INVALID_ASSOC_HDR 0x11
+#define LSEXP_ASSOC_HDR_REQ     0x13
+#define LSEXP_INVALID_O_SID     0x15
+#define LSEXP_INVALID_OX_RX     0x17
+#define LSEXP_CMD_IN_PROGRESS   0x19
+#define LSEXP_PORT_LOGIN_REQ    0x1E
+#define LSEXP_INVALID_NPORT_ID  0x1F
+#define LSEXP_INVALID_SEQ_ID    0x21
+#define LSEXP_INVALID_XCHG      0x23
+#define LSEXP_INACTIVE_XCHG     0x25
+#define LSEXP_RQ_REQUIRED       0x27
+#define LSEXP_OUT_OF_RESOURCE   0x29
+#define LSEXP_CANT_GIVE_DATA    0x2A
+#define LSEXP_REQ_UNSUPPORTED   0x2C
+			uint8_t vendorUnique;	/* FC Word 0, bit  0: 7 */
+		} b;
+	} un;
+};
+
+/*
+ *  N_Port Login (FLOGO/PLOGO Request) Payload Definition
+ */
+
+typedef struct _LOGO {		/* Structure is in Big Endian format */
+	union {
+		uint32_t nPortId32;	/* Access nPortId as a word */
+		struct {
+			uint8_t word1Reserved1;	/* FC Word 1, bit 31:24 */
+			uint8_t nPortIdByte0;	/* N_port  ID bit 16:23 */
+			uint8_t nPortIdByte1;	/* N_port  ID bit  8:15 */
+			uint8_t nPortIdByte2;	/* N_port  ID bit  0: 7 */
+		} b;
+	} un;
+	struct lpfc_name portName;	/* N_port name field */
+} LOGO;
+
+/*
+ *  FCP Login (PRLI Request / ACC) Payload Definition
+ */
+
+#define PRLX_PAGE_LEN   0x10
+#define TPRLO_PAGE_LEN  0x14
+
+typedef struct _PRLI {		/* Structure is in Big Endian format */
+	uint8_t prliType;	/* FC Parm Word 0, bit 24:31 */
+
+#define PRLI_FCP_TYPE 0x08
+	uint8_t word0Reserved1;	/* FC Parm Word 0, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t origProcAssocV:1;	/* FC Parm Word 0, bit 15 */
+	uint8_t respProcAssocV:1;	/* FC Parm Word 0, bit 14 */
+	uint8_t estabImagePair:1;	/* FC Parm Word 0, bit 13 */
+
+	/*    ACC = imagePairEstablished */
+	uint8_t word0Reserved2:1;	/* FC Parm Word 0, bit 12 */
+	uint8_t acceptRspCode:4;	/* FC Parm Word 0, bit 8:11, ACC ONLY */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint8_t acceptRspCode:4;	/* FC Parm Word 0, bit 8:11, ACC ONLY */
+	uint8_t word0Reserved2:1;	/* FC Parm Word 0, bit 12 */
+	uint8_t estabImagePair:1;	/* FC Parm Word 0, bit 13 */
+	uint8_t respProcAssocV:1;	/* FC Parm Word 0, bit 14 */
+	uint8_t origProcAssocV:1;	/* FC Parm Word 0, bit 15 */
+	/*    ACC = imagePairEstablished */
+#endif
+
+#define PRLI_REQ_EXECUTED     0x1	/* acceptRspCode */
+#define PRLI_NO_RESOURCES     0x2
+#define PRLI_INIT_INCOMPLETE  0x3
+#define PRLI_NO_SUCH_PA       0x4
+#define PRLI_PREDEF_CONFIG    0x5
+#define PRLI_PARTIAL_SUCCESS  0x6
+#define PRLI_INVALID_PAGE_CNT 0x7
+	uint8_t word0Reserved3;	/* FC Parm Word 0, bit 0:7 */
+
+	uint32_t origProcAssoc;	/* FC Parm Word 1, bit 0:31 */
+
+	uint32_t respProcAssoc;	/* FC Parm Word 2, bit 0:31 */
+
+	uint8_t word3Reserved1;	/* FC Parm Word 3, bit 24:31 */
+	uint8_t word3Reserved2;	/* FC Parm Word 3, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t Word3bit15Resved:1;	/* FC Parm Word 3, bit 15 */
+	uint16_t Word3bit14Resved:1;	/* FC Parm Word 3, bit 14 */
+	uint16_t Word3bit13Resved:1;	/* FC Parm Word 3, bit 13 */
+	uint16_t Word3bit12Resved:1;	/* FC Parm Word 3, bit 12 */
+	uint16_t Word3bit11Resved:1;	/* FC Parm Word 3, bit 11 */
+	uint16_t Word3bit10Resved:1;	/* FC Parm Word 3, bit 10 */
+	uint16_t TaskRetryIdReq:1;	/* FC Parm Word 3, bit  9 */
+	uint16_t Retry:1;	/* FC Parm Word 3, bit  8 */
+	uint16_t ConfmComplAllowed:1;	/* FC Parm Word 3, bit  7 */
+	uint16_t dataOverLay:1;	/* FC Parm Word 3, bit  6 */
+	uint16_t initiatorFunc:1;	/* FC Parm Word 3, bit  5 */
+	uint16_t targetFunc:1;	/* FC Parm Word 3, bit  4 */
+	uint16_t cmdDataMixEna:1;	/* FC Parm Word 3, bit  3 */
+	uint16_t dataRspMixEna:1;	/* FC Parm Word 3, bit  2 */
+	uint16_t readXferRdyDis:1;	/* FC Parm Word 3, bit  1 */
+	uint16_t writeXferRdyDis:1;	/* FC Parm Word 3, bit  0 */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t Retry:1;	/* FC Parm Word 3, bit  8 */
+	uint16_t TaskRetryIdReq:1;	/* FC Parm Word 3, bit  9 */
+	uint16_t Word3bit10Resved:1;	/* FC Parm Word 3, bit 10 */
+	uint16_t Word3bit11Resved:1;	/* FC Parm Word 3, bit 11 */
+	uint16_t Word3bit12Resved:1;	/* FC Parm Word 3, bit 12 */
+	uint16_t Word3bit13Resved:1;	/* FC Parm Word 3, bit 13 */
+	uint16_t Word3bit14Resved:1;	/* FC Parm Word 3, bit 14 */
+	uint16_t Word3bit15Resved:1;	/* FC Parm Word 3, bit 15 */
+	uint16_t writeXferRdyDis:1;	/* FC Parm Word 3, bit  0 */
+	uint16_t readXferRdyDis:1;	/* FC Parm Word 3, bit  1 */
+	uint16_t dataRspMixEna:1;	/* FC Parm Word 3, bit  2 */
+	uint16_t cmdDataMixEna:1;	/* FC Parm Word 3, bit  3 */
+	uint16_t targetFunc:1;	/* FC Parm Word 3, bit  4 */
+	uint16_t initiatorFunc:1;	/* FC Parm Word 3, bit  5 */
+	uint16_t dataOverLay:1;	/* FC Parm Word 3, bit  6 */
+	uint16_t ConfmComplAllowed:1;	/* FC Parm Word 3, bit  7 */
+#endif
+} PRLI;
+
+/*
+ *  FCP Logout (PRLO Request / ACC) Payload Definition
+ */
+
+typedef struct _PRLO {		/* Structure is in Big Endian format */
+	uint8_t prloType;	/* FC Parm Word 0, bit 24:31 */
+
+#define PRLO_FCP_TYPE  0x08
+	uint8_t word0Reserved1;	/* FC Parm Word 0, bit 16:23 */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t origProcAssocV:1;	/* FC Parm Word 0, bit 15 */
+	uint8_t respProcAssocV:1;	/* FC Parm Word 0, bit 14 */
+	uint8_t word0Reserved2:2;	/* FC Parm Word 0, bit 12:13 */
+	uint8_t acceptRspCode:4;	/* FC Parm Word 0, bit 8:11, ACC ONLY */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint8_t acceptRspCode:4;	/* FC Parm Word 0, bit 8:11, ACC ONLY */
+	uint8_t word0Reserved2:2;	/* FC Parm Word 0, bit 12:13 */
+	uint8_t respProcAssocV:1;	/* FC Parm Word 0, bit 14 */
+	uint8_t origProcAssocV:1;	/* FC Parm Word 0, bit 15 */
+#endif
+
+#define PRLO_REQ_EXECUTED     0x1	/* acceptRspCode */
+#define PRLO_NO_SUCH_IMAGE    0x4
+#define PRLO_INVALID_PAGE_CNT 0x7
+
+	uint8_t word0Reserved3;	/* FC Parm Word 0, bit 0:7 */
+
+	uint32_t origProcAssoc;	/* FC Parm Word 1, bit 0:31 */
+
+	uint32_t respProcAssoc;	/* FC Parm Word 2, bit 0:31 */
+
+	uint32_t word3Reserved1;	/* FC Parm Word 3, bit 0:31 */
+} PRLO;
+
+typedef struct _ADISC {		/* Structure is in Big Endian format */
+	uint32_t hardAL_PA;
+	struct lpfc_name portName;
+	struct lpfc_name nodeName;
+	uint32_t DID;
+} ADISC;
+
+typedef struct _FARP {		/* Structure is in Big Endian format */
+	uint32_t Mflags:8;
+	uint32_t Odid:24;
+#define FARP_NO_ACTION          0	/* FARP information enclosed, no
+					   action */
+#define FARP_MATCH_PORT         0x1	/* Match on Responder Port Name */
+#define FARP_MATCH_NODE         0x2	/* Match on Responder Node Name */
+#define FARP_MATCH_IP           0x4	/* Match on IP address, not supported */
+#define FARP_MATCH_IPV4         0x5	/* Match on IPV4 address, not
+					   supported */
+#define FARP_MATCH_IPV6         0x6	/* Match on IPV6 address, not
+					   supported */
+	uint32_t Rflags:8;
+	uint32_t Rdid:24;
+#define FARP_REQUEST_PLOGI      0x1	/* Request for PLOGI */
+#define FARP_REQUEST_FARPR      0x2	/* Request for FARP Response */
+	struct lpfc_name OportName;
+	struct lpfc_name OnodeName;
+	struct lpfc_name RportName;
+	struct lpfc_name RnodeName;
+	uint8_t Oipaddr[16];
+	uint8_t Ripaddr[16];
+} FARP;
+
+typedef struct _FAN {		/* Structure is in Big Endian format */
+	uint32_t Fdid;
+	struct lpfc_name FportName;
+	struct lpfc_name FnodeName;
+} FAN;
+
+typedef struct _SCR {		/* Structure is in Big Endian format */
+	uint8_t resvd1;
+	uint8_t resvd2;
+	uint8_t resvd3;
+	uint8_t Function;
+#define  SCR_FUNC_FABRIC     0x01
+#define  SCR_FUNC_NPORT      0x02
+#define  SCR_FUNC_FULL       0x03
+#define  SCR_CLEAR           0xff
+} SCR;
+
+typedef struct _RNID_TOP_DISC {
+	struct lpfc_name portName;
+	uint8_t resvd[8];
+	uint32_t unitType;
+#define RNID_HBA            0x7
+#define RNID_HOST           0xa
+#define RNID_DRIVER         0xd
+	uint32_t physPort;
+	uint32_t attachedNodes;
+	uint16_t ipVersion;
+#define RNID_IPV4           0x1
+#define RNID_IPV6           0x2
+	uint16_t UDPport;
+	uint8_t ipAddr[16];
+	uint16_t resvd1;
+	uint16_t flags;
+#define RNID_TD_SUPPORT     0x1
+#define RNID_LP_VALID       0x2
+} RNID_TOP_DISC;
+
+typedef struct _RNID {		/* Structure is in Big Endian format */
+	uint8_t Format;
+#define RNID_TOPOLOGY_DISC  0xdf
+	uint8_t CommonLen;
+	uint8_t resvd1;
+	uint8_t SpecificLen;
+	struct lpfc_name portName;
+	struct lpfc_name nodeName;
+	union {
+		RNID_TOP_DISC topologyDisc;	/* topology disc (0xdf) */
+	} un;
+} RNID;
+
+typedef struct  _RPS {		/* Structure is in Big Endian format */
+	union {
+		uint32_t portNum;
+		struct lpfc_name portName;
+	} un;
+} RPS;
+
+typedef struct  _RPS_RSP {	/* Structure is in Big Endian format */
+	uint16_t rsvd1;
+	uint16_t portStatus;
+	uint32_t linkFailureCnt;
+	uint32_t lossSyncCnt;
+	uint32_t lossSignalCnt;
+	uint32_t primSeqErrCnt;
+	uint32_t invalidXmitWord;
+	uint32_t crcCnt;
+} RPS_RSP;
+
+struct RLS {			/* Structure is in Big Endian format */
+	uint32_t rls;
+#define rls_rsvd_SHIFT		24
+#define rls_rsvd_MASK		0x000000ff
+#define rls_rsvd_WORD		rls
+#define rls_did_SHIFT		0
+#define rls_did_MASK		0x00ffffff
+#define rls_did_WORD		rls
+};
+
+struct  RLS_RSP {		/* Structure is in Big Endian format */
+	uint32_t linkFailureCnt;
+	uint32_t lossSyncCnt;
+	uint32_t lossSignalCnt;
+	uint32_t primSeqErrCnt;
+	uint32_t invalidXmitWord;
+	uint32_t crcCnt;
+};
+
+struct RRQ {			/* Structure is in Big Endian format */
+	uint32_t rrq;
+#define rrq_rsvd_SHIFT		24
+#define rrq_rsvd_MASK		0x000000ff
+#define rrq_rsvd_WORD		rrq
+#define rrq_did_SHIFT		0
+#define rrq_did_MASK		0x00ffffff
+#define rrq_did_WORD		rrq
+	uint32_t rrq_exchg;
+#define rrq_oxid_SHIFT		16
+#define rrq_oxid_MASK		0xffff
+#define rrq_oxid_WORD		rrq_exchg
+#define rrq_rxid_SHIFT		0
+#define rrq_rxid_MASK		0xffff
+#define rrq_rxid_WORD		rrq_exchg
+};
+
+#define LPFC_MAX_VFN_PER_PFN	255 /* Maximum VFs allowed per ARI */
+#define LPFC_DEF_VFN_PER_PFN	0   /* Default VFs due to platform limitation*/
+
+struct RTV_RSP {		/* Structure is in Big Endian format */
+	uint32_t ratov;
+	uint32_t edtov;
+	uint32_t qtov;
+#define qtov_rsvd0_SHIFT	28
+#define qtov_rsvd0_MASK		0x0000000f
+#define qtov_rsvd0_WORD		qtov		/* reserved */
+#define qtov_edtovres_SHIFT	27
+#define qtov_edtovres_MASK	0x00000001
+#define qtov_edtovres_WORD	qtov		/* E_D_TOV Resolution */
+#define qtov__rsvd1_SHIFT	19
+#define qtov_rsvd1_MASK		0x0000003f
+#define qtov_rsvd1_WORD		qtov		/* reserved */
+#define qtov_rttov_SHIFT	18
+#define qtov_rttov_MASK		0x00000001
+#define qtov_rttov_WORD		qtov		/* R_T_TOV value */
+#define qtov_rsvd2_SHIFT	0
+#define qtov_rsvd2_MASK		0x0003ffff
+#define qtov_rsvd2_WORD		qtov		/* reserved */
+};
+
+
+typedef struct  _RPL {		/* Structure is in Big Endian format */
+	uint32_t maxsize;
+	uint32_t index;
+} RPL;
+
+typedef struct  _PORT_NUM_BLK {
+	uint32_t portNum;
+	uint32_t portID;
+	struct lpfc_name portName;
+} PORT_NUM_BLK;
+
+typedef struct  _RPL_RSP {	/* Structure is in Big Endian format */
+	uint32_t listLen;
+	uint32_t index;
+	PORT_NUM_BLK port_num_blk;
+} RPL_RSP;
+
+/* This is used for RSCN command */
+typedef struct _D_ID {		/* Structure is in Big Endian format */
+	union {
+		uint32_t word;
+		struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+			uint8_t resv;
+			uint8_t domain;
+			uint8_t area;
+			uint8_t id;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+			uint8_t id;
+			uint8_t area;
+			uint8_t domain;
+			uint8_t resv;
+#endif
+		} b;
+	} un;
+} D_ID;
+
+#define RSCN_ADDRESS_FORMAT_PORT	0x0
+#define RSCN_ADDRESS_FORMAT_AREA	0x1
+#define RSCN_ADDRESS_FORMAT_DOMAIN	0x2
+#define RSCN_ADDRESS_FORMAT_FABRIC	0x3
+#define RSCN_ADDRESS_FORMAT_MASK	0x3
+
+/*
+ *  Structure to define all ELS Payload types
+ */
+
+typedef struct _ELS_PKT {	/* Structure is in Big Endian format */
+	uint8_t elsCode;	/* FC Word 0, bit 24:31 */
+	uint8_t elsByte1;
+	uint8_t elsByte2;
+	uint8_t elsByte3;
+	union {
+		struct ls_rjt lsRjt;	/* Payload for LS_RJT ELS response */
+		struct serv_parm logi;	/* Payload for PLOGI/FLOGI/PDISC/ACC */
+		LOGO logo;	/* Payload for PLOGO/FLOGO/ACC */
+		PRLI prli;	/* Payload for PRLI/ACC */
+		PRLO prlo;	/* Payload for PRLO/ACC */
+		ADISC adisc;	/* Payload for ADISC/ACC */
+		FARP farp;	/* Payload for FARP/ACC */
+		FAN fan;	/* Payload for FAN */
+		SCR scr;	/* Payload for SCR/ACC */
+		RNID rnid;	/* Payload for RNID */
+		uint8_t pad[128 - 4];	/* Pad out to payload of 128 bytes */
+	} un;
+} ELS_PKT;
+
+/*
+ * FDMI
+ * HBA MAnagement Operations Command Codes
+ */
+#define  SLI_MGMT_GRHL     0x100	/* Get registered HBA list */
+#define  SLI_MGMT_GHAT     0x101	/* Get HBA attributes */
+#define  SLI_MGMT_GRPL     0x102	/* Get registered Port list */
+#define  SLI_MGMT_GPAT     0x110	/* Get Port attributes */
+#define  SLI_MGMT_RHBA     0x200	/* Register HBA */
+#define  SLI_MGMT_RHAT     0x201	/* Register HBA attributes */
+#define  SLI_MGMT_RPRT     0x210	/* Register Port */
+#define  SLI_MGMT_RPA      0x211	/* Register Port attributes */
+#define  SLI_MGMT_DHBA     0x300	/* De-register HBA */
+#define  SLI_MGMT_DPRT     0x310	/* De-register Port */
+
+/*
+ * Management Service Subtypes
+ */
+#define  SLI_CT_FDMI_Subtypes     0x10
+
+/*
+ * HBA Management Service Reject Code
+ */
+#define  REJECT_CODE             0x9	/* Unable to perform command request */
+
+/*
+ * HBA Management Service Reject Reason Code
+ * Please refer to the Reason Codes above
+ */
+
+/*
+ * HBA Attribute Types
+ */
+#define  NODE_NAME               0x1
+#define  MANUFACTURER            0x2
+#define  SERIAL_NUMBER           0x3
+#define  MODEL                   0x4
+#define  MODEL_DESCRIPTION       0x5
+#define  HARDWARE_VERSION        0x6
+#define  DRIVER_VERSION          0x7
+#define  OPTION_ROM_VERSION      0x8
+#define  FIRMWARE_VERSION        0x9
+#define  OS_NAME_VERSION	 0xa
+#define  MAX_CT_PAYLOAD_LEN	 0xb
+
+/*
+ * Port Attrubute Types
+ */
+#define  SUPPORTED_FC4_TYPES     0x1
+#define  SUPPORTED_SPEED         0x2
+#define  PORT_SPEED              0x3
+#define  MAX_FRAME_SIZE          0x4
+#define  OS_DEVICE_NAME          0x5
+#define  HOST_NAME               0x6
+
+union AttributesDef {
+	/* Structure is in Big Endian format */
+	struct {
+		uint32_t AttrType:16;
+		uint32_t AttrLen:16;
+	} bits;
+	uint32_t word;
+};
+
+
+/*
+ * HBA Attribute Entry (8 - 260 bytes)
+ */
+typedef struct {
+	union AttributesDef ad;
+	union {
+		uint32_t VendorSpecific;
+		uint8_t Manufacturer[64];
+		uint8_t SerialNumber[64];
+		uint8_t Model[256];
+		uint8_t ModelDescription[256];
+		uint8_t HardwareVersion[256];
+		uint8_t DriverVersion[256];
+		uint8_t OptionROMVersion[256];
+		uint8_t FirmwareVersion[256];
+		struct lpfc_name NodeName;
+		uint8_t SupportFC4Types[32];
+		uint32_t SupportSpeed;
+		uint32_t PortSpeed;
+		uint32_t MaxFrameSize;
+		uint8_t OsDeviceName[256];
+		uint8_t OsNameVersion[256];
+		uint32_t MaxCTPayloadLen;
+		uint8_t HostName[256];
+	} un;
+} ATTRIBUTE_ENTRY;
+
+/*
+ * HBA Attribute Block
+ */
+typedef struct {
+	uint32_t EntryCnt;	/* Number of HBA attribute entries */
+	ATTRIBUTE_ENTRY Entry;	/* Variable-length array */
+} ATTRIBUTE_BLOCK;
+
+/*
+ * Port Entry
+ */
+typedef struct {
+	struct lpfc_name PortName;
+} PORT_ENTRY;
+
+/*
+ * HBA Identifier
+ */
+typedef struct {
+	struct lpfc_name PortName;
+} HBA_IDENTIFIER;
+
+/*
+ * Registered Port List Format
+ */
+typedef struct {
+	uint32_t EntryCnt;
+	PORT_ENTRY pe;		/* Variable-length array */
+} REG_PORT_LIST;
+
+/*
+ * Register HBA(RHBA)
+ */
+typedef struct {
+	HBA_IDENTIFIER hi;
+	REG_PORT_LIST rpl;	/* variable-length array */
+/* ATTRIBUTE_BLOCK   ab; */
+} REG_HBA;
+
+/*
+ * Register HBA Attributes (RHAT)
+ */
+typedef struct {
+	struct lpfc_name HBA_PortName;
+	ATTRIBUTE_BLOCK ab;
+} REG_HBA_ATTRIBUTE;
+
+/*
+ * Register Port Attributes (RPA)
+ */
+typedef struct {
+	struct lpfc_name PortName;
+	ATTRIBUTE_BLOCK ab;
+} REG_PORT_ATTRIBUTE;
+
+/*
+ * Get Registered HBA List (GRHL) Accept Payload Format
+ */
+typedef struct {
+	uint32_t HBA__Entry_Cnt; /* Number of Registered HBA Identifiers */
+	struct lpfc_name HBA_PortName;	/* Variable-length array */
+} GRHL_ACC_PAYLOAD;
+
+/*
+ * Get Registered Port List (GRPL) Accept Payload Format
+ */
+typedef struct {
+	uint32_t RPL_Entry_Cnt;	/* Number of Registered Port Entries */
+	PORT_ENTRY Reg_Port_Entry[1];	/* Variable-length array */
+} GRPL_ACC_PAYLOAD;
+
+/*
+ * Get Port Attributes (GPAT) Accept Payload Format
+ */
+
+typedef struct {
+	ATTRIBUTE_BLOCK pab;
+} GPAT_ACC_PAYLOAD;
+
+
+/*
+ *  Begin HBA configuration parameters.
+ *  The PCI configuration register BAR assignments are:
+ *  BAR0, offset 0x10 - SLIM base memory address
+ *  BAR1, offset 0x14 - SLIM base memory high address
+ *  BAR2, offset 0x18 - REGISTER base memory address
+ *  BAR3, offset 0x1c - REGISTER base memory high address
+ *  BAR4, offset 0x20 - BIU I/O registers
+ *  BAR5, offset 0x24 - REGISTER base io high address
+ */
+
+/* Number of rings currently used and available. */
+#define MAX_CONFIGURED_RINGS     3
+#define MAX_RINGS                4
+
+/* IOCB / Mailbox is owned by FireFly */
+#define OWN_CHIP        1
+
+/* IOCB / Mailbox is owned by Host */
+#define OWN_HOST        0
+
+/* Number of 4-byte words in an IOCB. */
+#define IOCB_WORD_SZ    8
+
+/* network headers for Dfctl field */
+#define FC_NET_HDR      0x20
+
+/* Start FireFly Register definitions */
+#define PCI_VENDOR_ID_EMULEX        0x10df
+#define PCI_DEVICE_ID_FIREFLY       0x1ae5
+#define PCI_DEVICE_ID_PROTEUS_VF    0xe100
+#define PCI_DEVICE_ID_BALIUS        0xe131
+#define PCI_DEVICE_ID_PROTEUS_PF    0xe180
+#define PCI_DEVICE_ID_LANCER_FC     0xe200
+#define PCI_DEVICE_ID_LANCER_FC_VF  0xe208
+#define PCI_DEVICE_ID_LANCER_FCOE   0xe260
+#define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
+#define PCI_DEVICE_ID_SAT_SMB       0xf011
+#define PCI_DEVICE_ID_SAT_MID       0xf015
+#define PCI_DEVICE_ID_RFLY          0xf095
+#define PCI_DEVICE_ID_PFLY          0xf098
+#define PCI_DEVICE_ID_LP101         0xf0a1
+#define PCI_DEVICE_ID_TFLY          0xf0a5
+#define PCI_DEVICE_ID_BSMB          0xf0d1
+#define PCI_DEVICE_ID_BMID          0xf0d5
+#define PCI_DEVICE_ID_ZSMB          0xf0e1
+#define PCI_DEVICE_ID_ZMID          0xf0e5
+#define PCI_DEVICE_ID_NEPTUNE       0xf0f5
+#define PCI_DEVICE_ID_NEPTUNE_SCSP  0xf0f6
+#define PCI_DEVICE_ID_NEPTUNE_DCSP  0xf0f7
+#define PCI_DEVICE_ID_SAT           0xf100
+#define PCI_DEVICE_ID_SAT_SCSP      0xf111
+#define PCI_DEVICE_ID_SAT_DCSP      0xf112
+#define PCI_DEVICE_ID_FALCON        0xf180
+#define PCI_DEVICE_ID_SUPERFLY      0xf700
+#define PCI_DEVICE_ID_DRAGONFLY     0xf800
+#define PCI_DEVICE_ID_CENTAUR       0xf900
+#define PCI_DEVICE_ID_PEGASUS       0xf980
+#define PCI_DEVICE_ID_THOR          0xfa00
+#define PCI_DEVICE_ID_VIPER         0xfb00
+#define PCI_DEVICE_ID_LP10000S      0xfc00
+#define PCI_DEVICE_ID_LP11000S      0xfc10
+#define PCI_DEVICE_ID_LPE11000S     0xfc20
+#define PCI_DEVICE_ID_SAT_S         0xfc40
+#define PCI_DEVICE_ID_PROTEUS_S     0xfc50
+#define PCI_DEVICE_ID_HELIOS        0xfd00
+#define PCI_DEVICE_ID_HELIOS_SCSP   0xfd11
+#define PCI_DEVICE_ID_HELIOS_DCSP   0xfd12
+#define PCI_DEVICE_ID_ZEPHYR        0xfe00
+#define PCI_DEVICE_ID_HORNET        0xfe05
+#define PCI_DEVICE_ID_ZEPHYR_SCSP   0xfe11
+#define PCI_DEVICE_ID_ZEPHYR_DCSP   0xfe12
+#define PCI_VENDOR_ID_SERVERENGINE  0x19a2
+#define PCI_DEVICE_ID_TIGERSHARK    0x0704
+#define PCI_DEVICE_ID_TOMCAT        0x0714
+
+#define JEDEC_ID_ADDRESS            0x0080001c
+#define FIREFLY_JEDEC_ID            0x1ACC
+#define SUPERFLY_JEDEC_ID           0x0020
+#define DRAGONFLY_JEDEC_ID          0x0021
+#define DRAGONFLY_V2_JEDEC_ID       0x0025
+#define CENTAUR_2G_JEDEC_ID         0x0026
+#define CENTAUR_1G_JEDEC_ID         0x0028
+#define PEGASUS_ORION_JEDEC_ID      0x0036
+#define PEGASUS_JEDEC_ID            0x0038
+#define THOR_JEDEC_ID               0x0012
+#define HELIOS_JEDEC_ID             0x0364
+#define ZEPHYR_JEDEC_ID             0x0577
+#define VIPER_JEDEC_ID              0x4838
+#define SATURN_JEDEC_ID             0x1004
+#define HORNET_JDEC_ID              0x2057706D
+
+#define JEDEC_ID_MASK               0x0FFFF000
+#define JEDEC_ID_SHIFT              12
+#define FC_JEDEC_ID(id)             ((id & JEDEC_ID_MASK) >> JEDEC_ID_SHIFT)
+
+typedef struct {		/* FireFly BIU registers */
+	uint32_t hostAtt;	/* See definitions for Host Attention
+				   register */
+	uint32_t chipAtt;	/* See definitions for Chip Attention
+				   register */
+	uint32_t hostStatus;	/* See definitions for Host Status register */
+	uint32_t hostControl;	/* See definitions for Host Control register */
+	uint32_t buiConfig;	/* See definitions for BIU configuration
+				   register */
+} FF_REGS;
+
+/* IO Register size in bytes */
+#define FF_REG_AREA_SIZE       256
+
+/* Host Attention Register */
+
+#define HA_REG_OFFSET  0	/* Byte offset from register base address */
+
+#define HA_R0RE_REQ    0x00000001	/* Bit  0 */
+#define HA_R0CE_RSP    0x00000002	/* Bit  1 */
+#define HA_R0ATT       0x00000008	/* Bit  3 */
+#define HA_R1RE_REQ    0x00000010	/* Bit  4 */
+#define HA_R1CE_RSP    0x00000020	/* Bit  5 */
+#define HA_R1ATT       0x00000080	/* Bit  7 */
+#define HA_R2RE_REQ    0x00000100	/* Bit  8 */
+#define HA_R2CE_RSP    0x00000200	/* Bit  9 */
+#define HA_R2ATT       0x00000800	/* Bit 11 */
+#define HA_R3RE_REQ    0x00001000	/* Bit 12 */
+#define HA_R3CE_RSP    0x00002000	/* Bit 13 */
+#define HA_R3ATT       0x00008000	/* Bit 15 */
+#define HA_LATT        0x20000000	/* Bit 29 */
+#define HA_MBATT       0x40000000	/* Bit 30 */
+#define HA_ERATT       0x80000000	/* Bit 31 */
+
+#define HA_RXRE_REQ    0x00000001	/* Bit  0 */
+#define HA_RXCE_RSP    0x00000002	/* Bit  1 */
+#define HA_RXATT       0x00000008	/* Bit  3 */
+#define HA_RXMASK      0x0000000f
+
+#define HA_R0_CLR_MSK	(HA_R0RE_REQ | HA_R0CE_RSP | HA_R0ATT)
+#define HA_R1_CLR_MSK	(HA_R1RE_REQ | HA_R1CE_RSP | HA_R1ATT)
+#define HA_R2_CLR_MSK	(HA_R2RE_REQ | HA_R2CE_RSP | HA_R2ATT)
+#define HA_R3_CLR_MSK	(HA_R3RE_REQ | HA_R3CE_RSP | HA_R3ATT)
+
+#define HA_R0_POS	3
+#define HA_R1_POS	7
+#define HA_R2_POS	11
+#define HA_R3_POS	15
+#define HA_LE_POS	29
+#define HA_MB_POS	30
+#define HA_ER_POS	31
+/* Chip Attention Register */
+
+#define CA_REG_OFFSET  4	/* Byte offset from register base address */
+
+#define CA_R0CE_REQ    0x00000001	/* Bit  0 */
+#define CA_R0RE_RSP    0x00000002	/* Bit  1 */
+#define CA_R0ATT       0x00000008	/* Bit  3 */
+#define CA_R1CE_REQ    0x00000010	/* Bit  4 */
+#define CA_R1RE_RSP    0x00000020	/* Bit  5 */
+#define CA_R1ATT       0x00000080	/* Bit  7 */
+#define CA_R2CE_REQ    0x00000100	/* Bit  8 */
+#define CA_R2RE_RSP    0x00000200	/* Bit  9 */
+#define CA_R2ATT       0x00000800	/* Bit 11 */
+#define CA_R3CE_REQ    0x00001000	/* Bit 12 */
+#define CA_R3RE_RSP    0x00002000	/* Bit 13 */
+#define CA_R3ATT       0x00008000	/* Bit 15 */
+#define CA_MBATT       0x40000000	/* Bit 30 */
+
+/* Host Status Register */
+
+#define HS_REG_OFFSET  8	/* Byte offset from register base address */
+
+#define HS_MBRDY       0x00400000	/* Bit 22 */
+#define HS_FFRDY       0x00800000	/* Bit 23 */
+#define HS_FFER8       0x01000000	/* Bit 24 */
+#define HS_FFER7       0x02000000	/* Bit 25 */
+#define HS_FFER6       0x04000000	/* Bit 26 */
+#define HS_FFER5       0x08000000	/* Bit 27 */
+#define HS_FFER4       0x10000000	/* Bit 28 */
+#define HS_FFER3       0x20000000	/* Bit 29 */
+#define HS_FFER2       0x40000000	/* Bit 30 */
+#define HS_FFER1       0x80000000	/* Bit 31 */
+#define HS_CRIT_TEMP   0x00000100	/* Bit 8  */
+#define HS_FFERM       0xFF000100	/* Mask for error bits 31:24 and 8 */
+#define UNPLUG_ERR     0x00000001	/* Indicate pci hot unplug */
+/* Host Control Register */
+
+#define HC_REG_OFFSET  12	/* Byte offset from register base address */
+
+#define HC_MBINT_ENA   0x00000001	/* Bit  0 */
+#define HC_R0INT_ENA   0x00000002	/* Bit  1 */
+#define HC_R1INT_ENA   0x00000004	/* Bit  2 */
+#define HC_R2INT_ENA   0x00000008	/* Bit  3 */
+#define HC_R3INT_ENA   0x00000010	/* Bit  4 */
+#define HC_INITHBI     0x02000000	/* Bit 25 */
+#define HC_INITMB      0x04000000	/* Bit 26 */
+#define HC_INITFF      0x08000000	/* Bit 27 */
+#define HC_LAINT_ENA   0x20000000	/* Bit 29 */
+#define HC_ERINT_ENA   0x80000000	/* Bit 31 */
+
+/* Message Signaled Interrupt eXtension (MSI-X) message identifiers */
+#define MSIX_DFLT_ID	0
+#define MSIX_RNG0_ID	0
+#define MSIX_RNG1_ID	1
+#define MSIX_RNG2_ID	2
+#define MSIX_RNG3_ID	3
+
+#define MSIX_LINK_ID	4
+#define MSIX_MBOX_ID	5
+
+#define MSIX_SPARE0_ID	6
+#define MSIX_SPARE1_ID	7
+
+/* Mailbox Commands */
+#define MBX_SHUTDOWN        0x00	/* terminate testing */
+#define MBX_LOAD_SM         0x01
+#define MBX_READ_NV         0x02
+#define MBX_WRITE_NV        0x03
+#define MBX_RUN_BIU_DIAG    0x04
+#define MBX_INIT_LINK       0x05
+#define MBX_DOWN_LINK       0x06
+#define MBX_CONFIG_LINK     0x07
+#define MBX_CONFIG_RING     0x09
+#define MBX_RESET_RING      0x0A
+#define MBX_READ_CONFIG     0x0B
+#define MBX_READ_RCONFIG    0x0C
+#define MBX_READ_SPARM      0x0D
+#define MBX_READ_STATUS     0x0E
+#define MBX_READ_RPI        0x0F
+#define MBX_READ_XRI        0x10
+#define MBX_READ_REV        0x11
+#define MBX_READ_LNK_STAT   0x12
+#define MBX_REG_LOGIN       0x13
+#define MBX_UNREG_LOGIN     0x14
+#define MBX_CLEAR_LA        0x16
+#define MBX_DUMP_MEMORY     0x17
+#define MBX_DUMP_CONTEXT    0x18
+#define MBX_RUN_DIAGS       0x19
+#define MBX_RESTART         0x1A
+#define MBX_UPDATE_CFG      0x1B
+#define MBX_DOWN_LOAD       0x1C
+#define MBX_DEL_LD_ENTRY    0x1D
+#define MBX_RUN_PROGRAM     0x1E
+#define MBX_SET_MASK        0x20
+#define MBX_SET_VARIABLE    0x21
+#define MBX_UNREG_D_ID      0x23
+#define MBX_KILL_BOARD      0x24
+#define MBX_CONFIG_FARP     0x25
+#define MBX_BEACON          0x2A
+#define MBX_CONFIG_MSI      0x30
+#define MBX_HEARTBEAT       0x31
+#define MBX_WRITE_VPARMS    0x32
+#define MBX_ASYNCEVT_ENABLE 0x33
+#define MBX_READ_EVENT_LOG_STATUS 0x37
+#define MBX_READ_EVENT_LOG  0x38
+#define MBX_WRITE_EVENT_LOG 0x39
+
+#define MBX_PORT_CAPABILITIES 0x3B
+#define MBX_PORT_IOV_CONTROL 0x3C
+
+#define MBX_CONFIG_HBQ	    0x7C
+#define MBX_LOAD_AREA       0x81
+#define MBX_RUN_BIU_DIAG64  0x84
+#define MBX_CONFIG_PORT     0x88
+#define MBX_READ_SPARM64    0x8D
+#define MBX_READ_RPI64      0x8F
+#define MBX_REG_LOGIN64     0x93
+#define MBX_READ_TOPOLOGY   0x95
+#define MBX_REG_VPI	    0x96
+#define MBX_UNREG_VPI	    0x97
+
+#define MBX_WRITE_WWN       0x98
+#define MBX_SET_DEBUG       0x99
+#define MBX_LOAD_EXP_ROM    0x9C
+#define MBX_SLI4_CONFIG	    0x9B
+#define MBX_SLI4_REQ_FTRS   0x9D
+#define MBX_MAX_CMDS        0x9E
+#define MBX_RESUME_RPI      0x9E
+#define MBX_SLI2_CMD_MASK   0x80
+#define MBX_REG_VFI         0x9F
+#define MBX_REG_FCFI        0xA0
+#define MBX_UNREG_VFI       0xA1
+#define MBX_UNREG_FCFI	    0xA2
+#define MBX_INIT_VFI        0xA3
+#define MBX_INIT_VPI        0xA4
+
+#define MBX_AUTH_PORT       0xF8
+#define MBX_SECURITY_MGMT   0xF9
+
+/* IOCB Commands */
+
+#define CMD_RCV_SEQUENCE_CX     0x01
+#define CMD_XMIT_SEQUENCE_CR    0x02
+#define CMD_XMIT_SEQUENCE_CX    0x03
+#define CMD_XMIT_BCAST_CN       0x04
+#define CMD_XMIT_BCAST_CX       0x05
+#define CMD_QUE_RING_BUF_CN     0x06
+#define CMD_QUE_XRI_BUF_CX      0x07
+#define CMD_IOCB_CONTINUE_CN    0x08
+#define CMD_RET_XRI_BUF_CX      0x09
+#define CMD_ELS_REQUEST_CR      0x0A
+#define CMD_ELS_REQUEST_CX      0x0B
+#define CMD_RCV_ELS_REQ_CX      0x0D
+#define CMD_ABORT_XRI_CN        0x0E
+#define CMD_ABORT_XRI_CX        0x0F
+#define CMD_CLOSE_XRI_CN        0x10
+#define CMD_CLOSE_XRI_CX        0x11
+#define CMD_CREATE_XRI_CR       0x12
+#define CMD_CREATE_XRI_CX       0x13
+#define CMD_GET_RPI_CN          0x14
+#define CMD_XMIT_ELS_RSP_CX     0x15
+#define CMD_GET_RPI_CR          0x16
+#define CMD_XRI_ABORTED_CX      0x17
+#define CMD_FCP_IWRITE_CR       0x18
+#define CMD_FCP_IWRITE_CX       0x19
+#define CMD_FCP_IREAD_CR        0x1A
+#define CMD_FCP_IREAD_CX        0x1B
+#define CMD_FCP_ICMND_CR        0x1C
+#define CMD_FCP_ICMND_CX        0x1D
+#define CMD_FCP_TSEND_CX        0x1F
+#define CMD_FCP_TRECEIVE_CX     0x21
+#define CMD_FCP_TRSP_CX	        0x23
+#define CMD_FCP_AUTO_TRSP_CX    0x29
+
+#define CMD_ADAPTER_MSG         0x20
+#define CMD_ADAPTER_DUMP        0x22
+
+/*  SLI_2 IOCB Command Set */
+
+#define CMD_ASYNC_STATUS        0x7C
+#define CMD_RCV_SEQUENCE64_CX   0x81
+#define CMD_XMIT_SEQUENCE64_CR  0x82
+#define CMD_XMIT_SEQUENCE64_CX  0x83
+#define CMD_XMIT_BCAST64_CN     0x84
+#define CMD_XMIT_BCAST64_CX     0x85
+#define CMD_QUE_RING_BUF64_CN   0x86
+#define CMD_QUE_XRI_BUF64_CX    0x87
+#define CMD_IOCB_CONTINUE64_CN  0x88
+#define CMD_RET_XRI_BUF64_CX    0x89
+#define CMD_ELS_REQUEST64_CR    0x8A
+#define CMD_ELS_REQUEST64_CX    0x8B
+#define CMD_ABORT_MXRI64_CN     0x8C
+#define CMD_RCV_ELS_REQ64_CX    0x8D
+#define CMD_XMIT_ELS_RSP64_CX   0x95
+#define CMD_XMIT_BLS_RSP64_CX   0x97
+#define CMD_FCP_IWRITE64_CR     0x98
+#define CMD_FCP_IWRITE64_CX     0x99
+#define CMD_FCP_IREAD64_CR      0x9A
+#define CMD_FCP_IREAD64_CX      0x9B
+#define CMD_FCP_ICMND64_CR      0x9C
+#define CMD_FCP_ICMND64_CX      0x9D
+#define CMD_FCP_TSEND64_CX      0x9F
+#define CMD_FCP_TRECEIVE64_CX   0xA1
+#define CMD_FCP_TRSP64_CX       0xA3
+
+#define CMD_QUE_XRI64_CX	0xB3
+#define CMD_IOCB_RCV_SEQ64_CX	0xB5
+#define CMD_IOCB_RCV_ELS64_CX	0xB7
+#define CMD_IOCB_RET_XRI64_CX	0xB9
+#define CMD_IOCB_RCV_CONT64_CX	0xBB
+
+#define CMD_GEN_REQUEST64_CR    0xC2
+#define CMD_GEN_REQUEST64_CX    0xC3
+
+/* Unhandled SLI-3 Commands */
+#define CMD_IOCB_XMIT_MSEQ64_CR		0xB0
+#define CMD_IOCB_XMIT_MSEQ64_CX		0xB1
+#define CMD_IOCB_RCV_SEQ_LIST64_CX	0xC1
+#define CMD_IOCB_RCV_ELS_LIST64_CX	0xCD
+#define CMD_IOCB_CLOSE_EXTENDED_CN	0xB6
+#define CMD_IOCB_ABORT_EXTENDED_CN	0xBA
+#define CMD_IOCB_RET_HBQE64_CN		0xCA
+#define CMD_IOCB_FCP_IBIDIR64_CR	0xAC
+#define CMD_IOCB_FCP_IBIDIR64_CX	0xAD
+#define CMD_IOCB_FCP_ITASKMGT64_CX	0xAF
+#define CMD_IOCB_LOGENTRY_CN		0x94
+#define CMD_IOCB_LOGENTRY_ASYNC_CN	0x96
+
+/* Data Security SLI Commands */
+#define DSSCMD_IWRITE64_CR		0xF8
+#define DSSCMD_IWRITE64_CX		0xF9
+#define DSSCMD_IREAD64_CR		0xFA
+#define DSSCMD_IREAD64_CX		0xFB
+
+#define CMD_MAX_IOCB_CMD        0xFB
+#define CMD_IOCB_MASK           0xff
+
+#define MAX_MSG_DATA            28	/* max msg data in CMD_ADAPTER_MSG
+					   iocb */
+#define LPFC_MAX_ADPTMSG         32	/* max msg data */
+/*
+ *  Define Status
+ */
+#define MBX_SUCCESS                 0
+#define MBXERR_NUM_RINGS            1
+#define MBXERR_NUM_IOCBS            2
+#define MBXERR_IOCBS_EXCEEDED       3
+#define MBXERR_BAD_RING_NUMBER      4
+#define MBXERR_MASK_ENTRIES_RANGE   5
+#define MBXERR_MASKS_EXCEEDED       6
+#define MBXERR_BAD_PROFILE          7
+#define MBXERR_BAD_DEF_CLASS        8
+#define MBXERR_BAD_MAX_RESPONDER    9
+#define MBXERR_BAD_MAX_ORIGINATOR   10
+#define MBXERR_RPI_REGISTERED       11
+#define MBXERR_RPI_FULL             12
+#define MBXERR_NO_RESOURCES         13
+#define MBXERR_BAD_RCV_LENGTH       14
+#define MBXERR_DMA_ERROR            15
+#define MBXERR_ERROR                16
+#define MBXERR_LINK_DOWN            0x33
+#define MBXERR_SEC_NO_PERMISSION    0xF02
+#define MBX_NOT_FINISHED            255
+
+#define MBX_BUSY                   0xffffff /* Attempted cmd to busy Mailbox */
+#define MBX_TIMEOUT                0xfffffe /* time-out expired waiting for */
+
+#define TEMPERATURE_OFFSET 0xB0	/* Slim offset for critical temperature event */
+
+/*
+ *    Begin Structure Definitions for Mailbox Commands
+ */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t tval;
+	uint8_t tmask;
+	uint8_t rval;
+	uint8_t rmask;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint8_t rmask;
+	uint8_t rval;
+	uint8_t tmask;
+	uint8_t tval;
+#endif
+} RR_REG;
+
+struct ulp_bde {
+	uint32_t bdeAddress;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t bdeReserved:4;
+	uint32_t bdeAddrHigh:4;
+	uint32_t bdeSize:24;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t bdeSize:24;
+	uint32_t bdeAddrHigh:4;
+	uint32_t bdeReserved:4;
+#endif
+};
+
+typedef struct ULP_BDL {	/* SLI-2 */
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t bdeFlags:8;	/* BDL Flags */
+	uint32_t bdeSize:24;	/* Size of BDL array in host memory (bytes) */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t bdeSize:24;	/* Size of BDL array in host memory (bytes) */
+	uint32_t bdeFlags:8;	/* BDL Flags */
+#endif
+
+	uint32_t addrLow;	/* Address 0:31 */
+	uint32_t addrHigh;	/* Address 32:63 */
+	uint32_t ulpIoTag32;	/* Can be used for 32 bit I/O Tag */
+} ULP_BDL;
+
+/*
+ * BlockGuard Definitions
+ */
+
+enum lpfc_protgrp_type {
+	LPFC_PG_TYPE_INVALID = 0, /* used to indicate errors                  */
+	LPFC_PG_TYPE_NO_DIF,	  /* no DIF data pointed to by prot grp       */
+	LPFC_PG_TYPE_EMBD_DIF,	  /* DIF is embedded (inline) with data       */
+	LPFC_PG_TYPE_DIF_BUF	  /* DIF has its own scatter/gather list      */
+};
+
+/* PDE Descriptors */
+#define LPFC_PDE5_DESCRIPTOR		0x85
+#define LPFC_PDE6_DESCRIPTOR		0x86
+#define LPFC_PDE7_DESCRIPTOR		0x87
+
+/* BlockGuard Opcodes */
+#define BG_OP_IN_NODIF_OUT_CRC		0x0
+#define	BG_OP_IN_CRC_OUT_NODIF		0x1
+#define	BG_OP_IN_NODIF_OUT_CSUM		0x2
+#define	BG_OP_IN_CSUM_OUT_NODIF		0x3
+#define	BG_OP_IN_CRC_OUT_CRC		0x4
+#define	BG_OP_IN_CSUM_OUT_CSUM		0x5
+#define	BG_OP_IN_CRC_OUT_CSUM		0x6
+#define	BG_OP_IN_CSUM_OUT_CRC		0x7
+
+struct lpfc_pde5 {
+	uint32_t word0;
+#define pde5_type_SHIFT		24
+#define pde5_type_MASK		0x000000ff
+#define pde5_type_WORD		word0
+#define pde5_rsvd0_SHIFT	0
+#define pde5_rsvd0_MASK		0x00ffffff
+#define pde5_rsvd0_WORD		word0
+	uint32_t reftag;	/* Reference Tag Value			*/
+	uint32_t reftagtr;	/* Reference Tag Translation Value 	*/
+};
+
+struct lpfc_pde6 {
+	uint32_t word0;
+#define pde6_type_SHIFT		24
+#define pde6_type_MASK		0x000000ff
+#define pde6_type_WORD		word0
+#define pde6_rsvd0_SHIFT	0
+#define pde6_rsvd0_MASK		0x00ffffff
+#define pde6_rsvd0_WORD		word0
+	uint32_t word1;
+#define pde6_rsvd1_SHIFT	26
+#define pde6_rsvd1_MASK		0x0000003f
+#define pde6_rsvd1_WORD		word1
+#define pde6_na_SHIFT		25
+#define pde6_na_MASK		0x00000001
+#define pde6_na_WORD		word1
+#define pde6_rsvd2_SHIFT	16
+#define pde6_rsvd2_MASK		0x000001FF
+#define pde6_rsvd2_WORD		word1
+#define pde6_apptagtr_SHIFT	0
+#define pde6_apptagtr_MASK	0x0000ffff
+#define pde6_apptagtr_WORD	word1
+	uint32_t word2;
+#define pde6_optx_SHIFT		28
+#define pde6_optx_MASK		0x0000000f
+#define pde6_optx_WORD		word2
+#define pde6_oprx_SHIFT		24
+#define pde6_oprx_MASK		0x0000000f
+#define pde6_oprx_WORD		word2
+#define pde6_nr_SHIFT		23
+#define pde6_nr_MASK		0x00000001
+#define pde6_nr_WORD		word2
+#define pde6_ce_SHIFT		22
+#define pde6_ce_MASK		0x00000001
+#define pde6_ce_WORD		word2
+#define pde6_re_SHIFT		21
+#define pde6_re_MASK		0x00000001
+#define pde6_re_WORD		word2
+#define pde6_ae_SHIFT		20
+#define pde6_ae_MASK		0x00000001
+#define pde6_ae_WORD		word2
+#define pde6_ai_SHIFT		19
+#define pde6_ai_MASK		0x00000001
+#define pde6_ai_WORD		word2
+#define pde6_bs_SHIFT		16
+#define pde6_bs_MASK		0x00000007
+#define pde6_bs_WORD		word2
+#define pde6_apptagval_SHIFT	0
+#define pde6_apptagval_MASK	0x0000ffff
+#define pde6_apptagval_WORD	word2
+};
+
+struct lpfc_pde7 {
+	uint32_t word0;
+#define pde7_type_SHIFT		24
+#define pde7_type_MASK		0x000000ff
+#define pde7_type_WORD		word0
+#define pde7_rsvd0_SHIFT	0
+#define pde7_rsvd0_MASK		0x00ffffff
+#define pde7_rsvd0_WORD		word0
+	uint32_t addrHigh;
+	uint32_t addrLow;
+};
+
+/* Structure for MB Command LOAD_SM and DOWN_LOAD */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd2:25;
+	uint32_t acknowledgment:1;
+	uint32_t version:1;
+	uint32_t erase_or_prog:1;
+	uint32_t update_flash:1;
+	uint32_t update_ram:1;
+	uint32_t method:1;
+	uint32_t load_cmplt:1;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t load_cmplt:1;
+	uint32_t method:1;
+	uint32_t update_ram:1;
+	uint32_t update_flash:1;
+	uint32_t erase_or_prog:1;
+	uint32_t version:1;
+	uint32_t acknowledgment:1;
+	uint32_t rsvd2:25;
+#endif
+
+	uint32_t dl_to_adr_low;
+	uint32_t dl_to_adr_high;
+	uint32_t dl_len;
+	union {
+		uint32_t dl_from_mbx_offset;
+		struct ulp_bde dl_from_bde;
+		struct ulp_bde64 dl_from_bde64;
+	} un;
+
+} LOAD_SM_VAR;
+
+/* Structure for MB Command READ_NVPARM (02) */
+
+typedef struct {
+	uint32_t rsvd1[3];	/* Read as all one's */
+	uint32_t rsvd2;		/* Read as all zero's */
+	uint32_t portname[2];	/* N_PORT name */
+	uint32_t nodename[2];	/* NODE name */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t pref_DID:24;
+	uint32_t hardAL_PA:8;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t hardAL_PA:8;
+	uint32_t pref_DID:24;
+#endif
+
+	uint32_t rsvd3[21];	/* Read as all one's */
+} READ_NV_VAR;
+
+/* Structure for MB Command WRITE_NVPARMS (03) */
+
+typedef struct {
+	uint32_t rsvd1[3];	/* Must be all one's */
+	uint32_t rsvd2;		/* Must be all zero's */
+	uint32_t portname[2];	/* N_PORT name */
+	uint32_t nodename[2];	/* NODE name */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t pref_DID:24;
+	uint32_t hardAL_PA:8;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t hardAL_PA:8;
+	uint32_t pref_DID:24;
+#endif
+
+	uint32_t rsvd3[21];	/* Must be all one's */
+} WRITE_NV_VAR;
+
+/* Structure for MB Command RUN_BIU_DIAG (04) */
+/* Structure for MB Command RUN_BIU_DIAG64 (0x84) */
+
+typedef struct {
+	uint32_t rsvd1;
+	union {
+		struct {
+			struct ulp_bde xmit_bde;
+			struct ulp_bde rcv_bde;
+		} s1;
+		struct {
+			struct ulp_bde64 xmit_bde64;
+			struct ulp_bde64 rcv_bde64;
+		} s2;
+	} un;
+} BIU_DIAG_VAR;
+
+/* Structure for MB command READ_EVENT_LOG (0x38) */
+struct READ_EVENT_LOG_VAR {
+	uint32_t word1;
+#define lpfc_event_log_SHIFT	29
+#define lpfc_event_log_MASK	0x00000001
+#define lpfc_event_log_WORD	word1
+#define USE_MAILBOX_RESPONSE	1
+	uint32_t offset;
+	struct ulp_bde64 rcv_bde64;
+};
+
+/* Structure for MB Command INIT_LINK (05) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd1:24;
+	uint32_t lipsr_AL_PA:8;	/* AL_PA to issue Lip Selective Reset to */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t lipsr_AL_PA:8;	/* AL_PA to issue Lip Selective Reset to */
+	uint32_t rsvd1:24;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t fabric_AL_PA;	/* If using a Fabric Assigned AL_PA */
+	uint8_t rsvd2;
+	uint16_t link_flags;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t link_flags;
+	uint8_t rsvd2;
+	uint8_t fabric_AL_PA;	/* If using a Fabric Assigned AL_PA */
+#endif
+
+#define FLAGS_TOPOLOGY_MODE_LOOP_PT  0x00 /* Attempt loop then pt-pt */
+#define FLAGS_LOCAL_LB               0x01 /* link_flags (=1) ENDEC loopback */
+#define FLAGS_TOPOLOGY_MODE_PT_PT    0x02 /* Attempt pt-pt only */
+#define FLAGS_TOPOLOGY_MODE_LOOP     0x04 /* Attempt loop only */
+#define FLAGS_TOPOLOGY_MODE_PT_LOOP  0x06 /* Attempt pt-pt then loop */
+#define	FLAGS_UNREG_LOGIN_ALL	     0x08 /* UNREG_LOGIN all on link down */
+#define FLAGS_LIRP_LILP              0x80 /* LIRP / LILP is disabled */
+
+#define FLAGS_TOPOLOGY_FAILOVER      0x0400	/* Bit 10 */
+#define FLAGS_LINK_SPEED             0x0800	/* Bit 11 */
+#define FLAGS_IMED_ABORT             0x04000	/* Bit 14 */
+
+	uint32_t link_speed;
+#define LINK_SPEED_AUTO 0x0     /* Auto selection */
+#define LINK_SPEED_1G   0x1     /* 1 Gigabaud */
+#define LINK_SPEED_2G   0x2     /* 2 Gigabaud */
+#define LINK_SPEED_4G   0x4     /* 4 Gigabaud */
+#define LINK_SPEED_8G   0x8     /* 8 Gigabaud */
+#define LINK_SPEED_10G  0x10    /* 10 Gigabaud */
+#define LINK_SPEED_16G  0x11    /* 16 Gigabaud */
+
+} INIT_LINK_VAR;
+
+/* Structure for MB Command DOWN_LINK (06) */
+
+typedef struct {
+	uint32_t rsvd1;
+} DOWN_LINK_VAR;
+
+/* Structure for MB Command CONFIG_LINK (07) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t cr:1;
+	uint32_t ci:1;
+	uint32_t cr_delay:6;
+	uint32_t cr_count:8;
+	uint32_t rsvd1:8;
+	uint32_t MaxBBC:8;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t MaxBBC:8;
+	uint32_t rsvd1:8;
+	uint32_t cr_count:8;
+	uint32_t cr_delay:6;
+	uint32_t ci:1;
+	uint32_t cr:1;
+#endif
+
+	uint32_t myId;
+	uint32_t rsvd2;
+	uint32_t edtov;
+	uint32_t arbtov;
+	uint32_t ratov;
+	uint32_t rttov;
+	uint32_t altov;
+	uint32_t crtov;
+	uint32_t citov;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rrq_enable:1;
+	uint32_t rrq_immed:1;
+	uint32_t rsvd4:29;
+	uint32_t ack0_enable:1;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t ack0_enable:1;
+	uint32_t rsvd4:29;
+	uint32_t rrq_immed:1;
+	uint32_t rrq_enable:1;
+#endif
+} CONFIG_LINK;
+
+/* Structure for MB Command PART_SLIM (08)
+ * will be removed since SLI1 is no longer supported!
+ */
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t offCiocb;
+	uint16_t numCiocb;
+	uint16_t offRiocb;
+	uint16_t numRiocb;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t numCiocb;
+	uint16_t offCiocb;
+	uint16_t numRiocb;
+	uint16_t offRiocb;
+#endif
+} RING_DEF;
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t unused1:24;
+	uint32_t numRing:8;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t numRing:8;
+	uint32_t unused1:24;
+#endif
+
+	RING_DEF ringdef[4];
+	uint32_t hbainit;
+} PART_SLIM_VAR;
+
+/* Structure for MB Command CONFIG_RING (09) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t unused2:6;
+	uint32_t recvSeq:1;
+	uint32_t recvNotify:1;
+	uint32_t numMask:8;
+	uint32_t profile:8;
+	uint32_t unused1:4;
+	uint32_t ring:4;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t ring:4;
+	uint32_t unused1:4;
+	uint32_t profile:8;
+	uint32_t numMask:8;
+	uint32_t recvNotify:1;
+	uint32_t recvSeq:1;
+	uint32_t unused2:6;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t maxRespXchg;
+	uint16_t maxOrigXchg;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t maxOrigXchg;
+	uint16_t maxRespXchg;
+#endif
+
+	RR_REG rrRegs[6];
+} CONFIG_RING_VAR;
+
+/* Structure for MB Command RESET_RING (10) */
+
+typedef struct {
+	uint32_t ring_no;
+} RESET_RING_VAR;
+
+/* Structure for MB Command READ_CONFIG (11) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t cr:1;
+	uint32_t ci:1;
+	uint32_t cr_delay:6;
+	uint32_t cr_count:8;
+	uint32_t InitBBC:8;
+	uint32_t MaxBBC:8;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t MaxBBC:8;
+	uint32_t InitBBC:8;
+	uint32_t cr_count:8;
+	uint32_t cr_delay:6;
+	uint32_t ci:1;
+	uint32_t cr:1;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t topology:8;
+	uint32_t myDid:24;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t myDid:24;
+	uint32_t topology:8;
+#endif
+
+	/* Defines for topology (defined previously) */
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t AR:1;
+	uint32_t IR:1;
+	uint32_t rsvd1:29;
+	uint32_t ack0:1;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t ack0:1;
+	uint32_t rsvd1:29;
+	uint32_t IR:1;
+	uint32_t AR:1;
+#endif
+
+	uint32_t edtov;
+	uint32_t arbtov;
+	uint32_t ratov;
+	uint32_t rttov;
+	uint32_t altov;
+	uint32_t lmt;
+#define LMT_RESERVED  0x000    /* Not used */
+#define LMT_1Gb       0x004
+#define LMT_2Gb       0x008
+#define LMT_4Gb       0x040
+#define LMT_8Gb       0x080
+#define LMT_10Gb      0x100
+#define LMT_16Gb      0x200
+	uint32_t rsvd2;
+	uint32_t rsvd3;
+	uint32_t max_xri;
+	uint32_t max_iocb;
+	uint32_t max_rpi;
+	uint32_t avail_xri;
+	uint32_t avail_iocb;
+	uint32_t avail_rpi;
+	uint32_t max_vpi;
+	uint32_t rsvd4;
+	uint32_t rsvd5;
+	uint32_t avail_vpi;
+} READ_CONFIG_VAR;
+
+/* Structure for MB Command READ_RCONFIG (12) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd2:7;
+	uint32_t recvNotify:1;
+	uint32_t numMask:8;
+	uint32_t profile:8;
+	uint32_t rsvd1:4;
+	uint32_t ring:4;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t ring:4;
+	uint32_t rsvd1:4;
+	uint32_t profile:8;
+	uint32_t numMask:8;
+	uint32_t recvNotify:1;
+	uint32_t rsvd2:7;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t maxResp;
+	uint16_t maxOrig;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t maxOrig;
+	uint16_t maxResp;
+#endif
+
+	RR_REG rrRegs[6];
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t cmdRingOffset;
+	uint16_t cmdEntryCnt;
+	uint16_t rspRingOffset;
+	uint16_t rspEntryCnt;
+	uint16_t nextCmdOffset;
+	uint16_t rsvd3;
+	uint16_t nextRspOffset;
+	uint16_t rsvd4;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t cmdEntryCnt;
+	uint16_t cmdRingOffset;
+	uint16_t rspEntryCnt;
+	uint16_t rspRingOffset;
+	uint16_t rsvd3;
+	uint16_t nextCmdOffset;
+	uint16_t rsvd4;
+	uint16_t nextRspOffset;
+#endif
+} READ_RCONF_VAR;
+
+/* Structure for MB Command READ_SPARM (13) */
+/* Structure for MB Command READ_SPARM64 (0x8D) */
+
+typedef struct {
+	uint32_t rsvd1;
+	uint32_t rsvd2;
+	union {
+		struct ulp_bde sp; /* This BDE points to struct serv_parm
+				      structure */
+		struct ulp_bde64 sp64;
+	} un;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t rsvd3;
+	uint16_t vpi;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t vpi;
+	uint16_t rsvd3;
+#endif
+} READ_SPARM_VAR;
+
+/* Structure for MB Command READ_STATUS (14) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd1:31;
+	uint32_t clrCounters:1;
+	uint16_t activeXriCnt;
+	uint16_t activeRpiCnt;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t clrCounters:1;
+	uint32_t rsvd1:31;
+	uint16_t activeRpiCnt;
+	uint16_t activeXriCnt;
+#endif
+
+	uint32_t xmitByteCnt;
+	uint32_t rcvByteCnt;
+	uint32_t xmitFrameCnt;
+	uint32_t rcvFrameCnt;
+	uint32_t xmitSeqCnt;
+	uint32_t rcvSeqCnt;
+	uint32_t totalOrigExchanges;
+	uint32_t totalRespExchanges;
+	uint32_t rcvPbsyCnt;
+	uint32_t rcvFbsyCnt;
+} READ_STATUS_VAR;
+
+/* Structure for MB Command READ_RPI (15) */
+/* Structure for MB Command READ_RPI64 (0x8F) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t nextRpi;
+	uint16_t reqRpi;
+	uint32_t rsvd2:8;
+	uint32_t DID:24;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t reqRpi;
+	uint16_t nextRpi;
+	uint32_t DID:24;
+	uint32_t rsvd2:8;
+#endif
+
+	union {
+		struct ulp_bde sp;
+		struct ulp_bde64 sp64;
+	} un;
+
+} READ_RPI_VAR;
+
+/* Structure for MB Command READ_XRI (16) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t nextXri;
+	uint16_t reqXri;
+	uint16_t rsvd1;
+	uint16_t rpi;
+	uint32_t rsvd2:8;
+	uint32_t DID:24;
+	uint32_t rsvd3:8;
+	uint32_t SID:24;
+	uint32_t rsvd4;
+	uint8_t seqId;
+	uint8_t rsvd5;
+	uint16_t seqCount;
+	uint16_t oxId;
+	uint16_t rxId;
+	uint32_t rsvd6:30;
+	uint32_t si:1;
+	uint32_t exchOrig:1;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t reqXri;
+	uint16_t nextXri;
+	uint16_t rpi;
+	uint16_t rsvd1;
+	uint32_t DID:24;
+	uint32_t rsvd2:8;
+	uint32_t SID:24;
+	uint32_t rsvd3:8;
+	uint32_t rsvd4;
+	uint16_t seqCount;
+	uint8_t rsvd5;
+	uint8_t seqId;
+	uint16_t rxId;
+	uint16_t oxId;
+	uint32_t exchOrig:1;
+	uint32_t si:1;
+	uint32_t rsvd6:30;
+#endif
+} READ_XRI_VAR;
+
+/* Structure for MB Command READ_REV (17) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t cv:1;
+	uint32_t rr:1;
+	uint32_t rsvd2:2;
+	uint32_t v3req:1;
+	uint32_t v3rsp:1;
+	uint32_t rsvd1:25;
+	uint32_t rv:1;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t rv:1;
+	uint32_t rsvd1:25;
+	uint32_t v3rsp:1;
+	uint32_t v3req:1;
+	uint32_t rsvd2:2;
+	uint32_t rr:1;
+	uint32_t cv:1;
+#endif
+
+	uint32_t biuRev;
+	uint32_t smRev;
+	union {
+		uint32_t smFwRev;
+		struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+			uint8_t ProgType;
+			uint8_t ProgId;
+			uint16_t ProgVer:4;
+			uint16_t ProgRev:4;
+			uint16_t ProgFixLvl:2;
+			uint16_t ProgDistType:2;
+			uint16_t DistCnt:4;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+			uint16_t DistCnt:4;
+			uint16_t ProgDistType:2;
+			uint16_t ProgFixLvl:2;
+			uint16_t ProgRev:4;
+			uint16_t ProgVer:4;
+			uint8_t ProgId;
+			uint8_t ProgType;
+#endif
+
+		} b;
+	} un;
+	uint32_t endecRev;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t feaLevelHigh;
+	uint8_t feaLevelLow;
+	uint8_t fcphHigh;
+	uint8_t fcphLow;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint8_t fcphLow;
+	uint8_t fcphHigh;
+	uint8_t feaLevelLow;
+	uint8_t feaLevelHigh;
+#endif
+
+	uint32_t postKernRev;
+	uint32_t opFwRev;
+	uint8_t opFwName[16];
+	uint32_t sli1FwRev;
+	uint8_t sli1FwName[16];
+	uint32_t sli2FwRev;
+	uint8_t sli2FwName[16];
+	uint32_t sli3Feat;
+	uint32_t RandomData[6];
+} READ_REV_VAR;
+
+/* Structure for MB Command READ_LINK_STAT (18) */
+
+typedef struct {
+	uint32_t rsvd1;
+	uint32_t linkFailureCnt;
+	uint32_t lossSyncCnt;
+
+	uint32_t lossSignalCnt;
+	uint32_t primSeqErrCnt;
+	uint32_t invalidXmitWord;
+	uint32_t crcCnt;
+	uint32_t primSeqTimeout;
+	uint32_t elasticOverrun;
+	uint32_t arbTimeout;
+} READ_LNK_VAR;
+
+/* Structure for MB Command REG_LOGIN (19) */
+/* Structure for MB Command REG_LOGIN64 (0x93) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t rsvd1;
+	uint16_t rpi;
+	uint32_t rsvd2:8;
+	uint32_t did:24;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t rpi;
+	uint16_t rsvd1;
+	uint32_t did:24;
+	uint32_t rsvd2:8;
+#endif
+
+	union {
+		struct ulp_bde sp;
+		struct ulp_bde64 sp64;
+	} un;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t rsvd6;
+	uint16_t vpi;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+	uint16_t vpi;
+	uint16_t rsvd6;
+#endif
+
+} REG_LOGIN_VAR;
+
+/* Word 30 contents for REG_LOGIN */
+typedef union {
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint16_t rsvd1:12;
+		uint16_t wd30_class:4;
+		uint16_t xri;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+		uint16_t xri;
+		uint16_t wd30_class:4;
+		uint16_t rsvd1:12;
+#endif
+	} f;
+	uint32_t word;
+} REG_WD30;
+
+/* Structure for MB Command UNREG_LOGIN (20) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t rsvd1;
+	uint16_t rpi;
+	uint32_t rsvd2;
+	uint32_t rsvd3;
+	uint32_t rsvd4;
+	uint32_t rsvd5;
+	uint16_t rsvd6;
+	uint16_t vpi;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t rpi;
+	uint16_t rsvd1;
+	uint32_t rsvd2;
+	uint32_t rsvd3;
+	uint32_t rsvd4;
+	uint32_t rsvd5;
+	uint16_t vpi;
+	uint16_t rsvd6;
+#endif
+} UNREG_LOGIN_VAR;
+
+/* Structure for MB Command REG_VPI (0x96) */
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd1;
+	uint32_t rsvd2:7;
+	uint32_t upd:1;
+	uint32_t sid:24;
+	uint32_t wwn[2];
+	uint32_t rsvd5;
+	uint16_t vfi;
+	uint16_t vpi;
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t rsvd1;
+	uint32_t sid:24;
+	uint32_t upd:1;
+	uint32_t rsvd2:7;
+	uint32_t wwn[2];
+	uint32_t rsvd5;
+	uint16_t vpi;
+	uint16_t vfi;
+#endif
+} REG_VPI_VAR;
+
+/* Structure for MB Command UNREG_VPI (0x97) */
+typedef struct {
+	uint32_t rsvd1;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t rsvd2;
+	uint16_t sli4_vpi;
+#else	/*  __LITTLE_ENDIAN */
+	uint16_t sli4_vpi;
+	uint16_t rsvd2;
+#endif
+	uint32_t rsvd3;
+	uint32_t rsvd4;
+	uint32_t rsvd5;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t rsvd6;
+	uint16_t vpi;
+#else	/*  __LITTLE_ENDIAN */
+	uint16_t vpi;
+	uint16_t rsvd6;
+#endif
+} UNREG_VPI_VAR;
+
+/* Structure for MB Command UNREG_D_ID (0x23) */
+
+typedef struct {
+	uint32_t did;
+	uint32_t rsvd2;
+	uint32_t rsvd3;
+	uint32_t rsvd4;
+	uint32_t rsvd5;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t rsvd6;
+	uint16_t vpi;
+#else
+	uint16_t vpi;
+	uint16_t rsvd6;
+#endif
+} UNREG_D_ID_VAR;
+
+/* Structure for MB Command READ_TOPOLOGY (0x95) */
+struct lpfc_mbx_read_top {
+	uint32_t eventTag;	/* Event tag */
+	uint32_t word2;
+#define lpfc_mbx_read_top_fa_SHIFT		12
+#define lpfc_mbx_read_top_fa_MASK		0x00000001
+#define lpfc_mbx_read_top_fa_WORD		word2
+#define lpfc_mbx_read_top_mm_SHIFT		11
+#define lpfc_mbx_read_top_mm_MASK		0x00000001
+#define lpfc_mbx_read_top_mm_WORD		word2
+#define lpfc_mbx_read_top_pb_SHIFT		9
+#define lpfc_mbx_read_top_pb_MASK		0X00000001
+#define lpfc_mbx_read_top_pb_WORD		word2
+#define lpfc_mbx_read_top_il_SHIFT		8
+#define lpfc_mbx_read_top_il_MASK		0x00000001
+#define lpfc_mbx_read_top_il_WORD		word2
+#define lpfc_mbx_read_top_att_type_SHIFT	0
+#define lpfc_mbx_read_top_att_type_MASK		0x000000FF
+#define lpfc_mbx_read_top_att_type_WORD		word2
+#define LPFC_ATT_RESERVED    0x00	/* Reserved - attType */
+#define LPFC_ATT_LINK_UP     0x01	/* Link is up */
+#define LPFC_ATT_LINK_DOWN   0x02	/* Link is down */
+	uint32_t word3;
+#define lpfc_mbx_read_top_alpa_granted_SHIFT	24
+#define lpfc_mbx_read_top_alpa_granted_MASK	0x000000FF
+#define lpfc_mbx_read_top_alpa_granted_WORD	word3
+#define lpfc_mbx_read_top_lip_alps_SHIFT	16
+#define lpfc_mbx_read_top_lip_alps_MASK		0x000000FF
+#define lpfc_mbx_read_top_lip_alps_WORD		word3
+#define lpfc_mbx_read_top_lip_type_SHIFT	8
+#define lpfc_mbx_read_top_lip_type_MASK		0x000000FF
+#define lpfc_mbx_read_top_lip_type_WORD		word3
+#define lpfc_mbx_read_top_topology_SHIFT	0
+#define lpfc_mbx_read_top_topology_MASK		0x000000FF
+#define lpfc_mbx_read_top_topology_WORD		word3
+#define LPFC_TOPOLOGY_PT_PT 0x01	/* Topology is pt-pt / pt-fabric */
+#define LPFC_TOPOLOGY_LOOP  0x02	/* Topology is FC-AL */
+#define LPFC_TOPOLOGY_MM    0x05	/* maint mode zephtr to menlo */
+	/* store the LILP AL_PA position map into */
+	struct ulp_bde64 lilpBde64;
+#define LPFC_ALPA_MAP_SIZE	128
+	uint32_t word7;
+#define lpfc_mbx_read_top_ld_lu_SHIFT		31
+#define lpfc_mbx_read_top_ld_lu_MASK		0x00000001
+#define lpfc_mbx_read_top_ld_lu_WORD		word7
+#define lpfc_mbx_read_top_ld_tf_SHIFT		30
+#define lpfc_mbx_read_top_ld_tf_MASK		0x00000001
+#define lpfc_mbx_read_top_ld_tf_WORD		word7
+#define lpfc_mbx_read_top_ld_link_spd_SHIFT	8
+#define lpfc_mbx_read_top_ld_link_spd_MASK	0x000000FF
+#define lpfc_mbx_read_top_ld_link_spd_WORD	word7
+#define lpfc_mbx_read_top_ld_nl_port_SHIFT	4
+#define lpfc_mbx_read_top_ld_nl_port_MASK	0x0000000F
+#define lpfc_mbx_read_top_ld_nl_port_WORD	word7
+#define lpfc_mbx_read_top_ld_tx_SHIFT		2
+#define lpfc_mbx_read_top_ld_tx_MASK		0x00000003
+#define lpfc_mbx_read_top_ld_tx_WORD		word7
+#define lpfc_mbx_read_top_ld_rx_SHIFT		0
+#define lpfc_mbx_read_top_ld_rx_MASK		0x00000003
+#define lpfc_mbx_read_top_ld_rx_WORD		word7
+	uint32_t word8;
+#define lpfc_mbx_read_top_lu_SHIFT		31
+#define lpfc_mbx_read_top_lu_MASK		0x00000001
+#define lpfc_mbx_read_top_lu_WORD		word8
+#define lpfc_mbx_read_top_tf_SHIFT		30
+#define lpfc_mbx_read_top_tf_MASK		0x00000001
+#define lpfc_mbx_read_top_tf_WORD		word8
+#define lpfc_mbx_read_top_link_spd_SHIFT	8
+#define lpfc_mbx_read_top_link_spd_MASK		0x000000FF
+#define lpfc_mbx_read_top_link_spd_WORD		word8
+#define lpfc_mbx_read_top_nl_port_SHIFT		4
+#define lpfc_mbx_read_top_nl_port_MASK		0x0000000F
+#define lpfc_mbx_read_top_nl_port_WORD		word8
+#define lpfc_mbx_read_top_tx_SHIFT		2
+#define lpfc_mbx_read_top_tx_MASK		0x00000003
+#define lpfc_mbx_read_top_tx_WORD		word8
+#define lpfc_mbx_read_top_rx_SHIFT		0
+#define lpfc_mbx_read_top_rx_MASK		0x00000003
+#define lpfc_mbx_read_top_rx_WORD		word8
+#define LPFC_LINK_SPEED_UNKNOWN	0x0
+#define LPFC_LINK_SPEED_1GHZ	0x04
+#define LPFC_LINK_SPEED_2GHZ	0x08
+#define LPFC_LINK_SPEED_4GHZ	0x10
+#define LPFC_LINK_SPEED_8GHZ	0x20
+#define LPFC_LINK_SPEED_10GHZ	0x40
+#define LPFC_LINK_SPEED_16GHZ	0x80
+};
+
+/* Structure for MB Command CLEAR_LA (22) */
+
+typedef struct {
+	uint32_t eventTag;	/* Event tag */
+	uint32_t rsvd1;
+} CLEAR_LA_VAR;
+
+/* Structure for MB Command DUMP */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd:25;
+	uint32_t ra:1;
+	uint32_t co:1;
+	uint32_t cv:1;
+	uint32_t type:4;
+	uint32_t entry_index:16;
+	uint32_t region_id:16;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t type:4;
+	uint32_t cv:1;
+	uint32_t co:1;
+	uint32_t ra:1;
+	uint32_t rsvd:25;
+	uint32_t region_id:16;
+	uint32_t entry_index:16;
+#endif
+
+	uint32_t sli4_length;
+	uint32_t word_cnt;
+	uint32_t resp_offset;
+} DUMP_VAR;
+
+#define  DMP_MEM_REG             0x1
+#define  DMP_NV_PARAMS           0x2
+#define  DMP_LMSD                0x3 /* Link Module Serial Data */
+#define  DMP_WELL_KNOWN          0x4
+
+#define  DMP_REGION_VPD          0xe
+#define  DMP_VPD_SIZE            0x400  /* maximum amount of VPD */
+#define  DMP_RSP_OFFSET          0x14   /* word 5 contains first word of rsp */
+#define  DMP_RSP_SIZE            0x6C   /* maximum of 27 words of rsp data */
+
+#define  DMP_REGION_VPORT	 0x16   /* VPort info region */
+#define  DMP_VPORT_REGION_SIZE	 0x200
+#define  DMP_MBOX_OFFSET_WORD	 0x5
+
+#define  DMP_REGION_23		 0x17   /* fcoe param  and port state region */
+#define  DMP_RGN23_SIZE		 0x400
+
+#define  WAKE_UP_PARMS_REGION_ID    4
+#define  WAKE_UP_PARMS_WORD_SIZE   15
+
+struct vport_rec {
+	uint8_t wwpn[8];
+	uint8_t wwnn[8];
+};
+
+#define VPORT_INFO_SIG 0x32324752
+#define VPORT_INFO_REV_MASK 0xff
+#define VPORT_INFO_REV 0x1
+#define MAX_STATIC_VPORT_COUNT 16
+struct static_vport_info {
+	uint32_t		signature;
+	uint32_t		rev;
+	struct vport_rec	vport_list[MAX_STATIC_VPORT_COUNT];
+	uint32_t		resvd[66];
+};
+
+/* Option rom version structure */
+struct prog_id {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t  type;
+	uint8_t  id;
+	uint32_t ver:4;  /* Major Version */
+	uint32_t rev:4;  /* Revision */
+	uint32_t lev:2;  /* Level */
+	uint32_t dist:2; /* Dist Type */
+	uint32_t num:4;  /* number after dist type */
+#else /*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t num:4;  /* number after dist type */
+	uint32_t dist:2; /* Dist Type */
+	uint32_t lev:2;  /* Level */
+	uint32_t rev:4;  /* Revision */
+	uint32_t ver:4;  /* Major Version */
+	uint8_t  id;
+	uint8_t  type;
+#endif
+};
+
+/* Structure for MB Command UPDATE_CFG (0x1B) */
+
+struct update_cfg_var {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd2:16;
+	uint32_t type:8;
+	uint32_t rsvd:1;
+	uint32_t ra:1;
+	uint32_t co:1;
+	uint32_t cv:1;
+	uint32_t req:4;
+	uint32_t entry_length:16;
+	uint32_t region_id:16;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t req:4;
+	uint32_t cv:1;
+	uint32_t co:1;
+	uint32_t ra:1;
+	uint32_t rsvd:1;
+	uint32_t type:8;
+	uint32_t rsvd2:16;
+	uint32_t region_id:16;
+	uint32_t entry_length:16;
+#endif
+
+	uint32_t resp_info;
+	uint32_t byte_cnt;
+	uint32_t data_offset;
+};
+
+struct hbq_mask {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t tmatch;
+	uint8_t tmask;
+	uint8_t rctlmatch;
+	uint8_t rctlmask;
+#else	/*  __LITTLE_ENDIAN */
+	uint8_t rctlmask;
+	uint8_t rctlmatch;
+	uint8_t tmask;
+	uint8_t tmatch;
+#endif
+};
+
+
+/* Structure for MB Command CONFIG_HBQ (7c) */
+
+struct config_hbq_var {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd1      :7;
+	uint32_t recvNotify :1;     /* Receive Notification */
+	uint32_t numMask    :8;     /* # Mask Entries       */
+	uint32_t profile    :8;     /* Selection Profile    */
+	uint32_t rsvd2      :8;
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t rsvd2      :8;
+	uint32_t profile    :8;     /* Selection Profile    */
+	uint32_t numMask    :8;     /* # Mask Entries       */
+	uint32_t recvNotify :1;     /* Receive Notification */
+	uint32_t rsvd1      :7;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t hbqId      :16;
+	uint32_t rsvd3      :12;
+	uint32_t ringMask   :4;
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t ringMask   :4;
+	uint32_t rsvd3      :12;
+	uint32_t hbqId      :16;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t entry_count :16;
+	uint32_t rsvd4        :8;
+	uint32_t headerLen    :8;
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t headerLen    :8;
+	uint32_t rsvd4        :8;
+	uint32_t entry_count :16;
+#endif
+
+	uint32_t hbqaddrLow;
+	uint32_t hbqaddrHigh;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd5      :31;
+	uint32_t logEntry   :1;
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t logEntry   :1;
+	uint32_t rsvd5      :31;
+#endif
+
+	uint32_t rsvd6;    /* w7 */
+	uint32_t rsvd7;    /* w8 */
+	uint32_t rsvd8;    /* w9 */
+
+	struct hbq_mask hbqMasks[6];
+
+
+	union {
+		uint32_t allprofiles[12];
+
+		struct {
+			#ifdef __BIG_ENDIAN_BITFIELD
+				uint32_t	seqlenoff	:16;
+				uint32_t	maxlen		:16;
+			#else	/*  __LITTLE_ENDIAN */
+				uint32_t	maxlen		:16;
+				uint32_t	seqlenoff	:16;
+			#endif
+			#ifdef __BIG_ENDIAN_BITFIELD
+				uint32_t	rsvd1		:28;
+				uint32_t	seqlenbcnt	:4;
+			#else	/*  __LITTLE_ENDIAN */
+				uint32_t	seqlenbcnt	:4;
+				uint32_t	rsvd1		:28;
+			#endif
+			uint32_t rsvd[10];
+		} profile2;
+
+		struct {
+			#ifdef __BIG_ENDIAN_BITFIELD
+				uint32_t	seqlenoff	:16;
+				uint32_t	maxlen		:16;
+			#else	/*  __LITTLE_ENDIAN */
+				uint32_t	maxlen		:16;
+				uint32_t	seqlenoff	:16;
+			#endif
+			#ifdef __BIG_ENDIAN_BITFIELD
+				uint32_t	cmdcodeoff	:28;
+				uint32_t	rsvd1		:12;
+				uint32_t	seqlenbcnt	:4;
+			#else	/*  __LITTLE_ENDIAN */
+				uint32_t	seqlenbcnt	:4;
+				uint32_t	rsvd1		:12;
+				uint32_t	cmdcodeoff	:28;
+			#endif
+			uint32_t cmdmatch[8];
+
+			uint32_t rsvd[2];
+		} profile3;
+
+		struct {
+			#ifdef __BIG_ENDIAN_BITFIELD
+				uint32_t	seqlenoff	:16;
+				uint32_t	maxlen		:16;
+			#else	/*  __LITTLE_ENDIAN */
+				uint32_t	maxlen		:16;
+				uint32_t	seqlenoff	:16;
+			#endif
+			#ifdef __BIG_ENDIAN_BITFIELD
+				uint32_t	cmdcodeoff	:28;
+				uint32_t	rsvd1		:12;
+				uint32_t	seqlenbcnt	:4;
+			#else	/*  __LITTLE_ENDIAN */
+				uint32_t	seqlenbcnt	:4;
+				uint32_t	rsvd1		:12;
+				uint32_t	cmdcodeoff	:28;
+			#endif
+			uint32_t cmdmatch[8];
+
+			uint32_t rsvd[2];
+		} profile5;
+
+	} profiles;
+
+};
+
+
+
+/* Structure for MB Command CONFIG_PORT (0x88) */
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t cBE       :  1;
+	uint32_t cET       :  1;
+	uint32_t cHpcb     :  1;
+	uint32_t cMA       :  1;
+	uint32_t sli_mode  :  4;
+	uint32_t pcbLen    : 24;       /* bit 23:0  of memory based port
+					* config block */
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t pcbLen    : 24;       /* bit 23:0  of memory based port
+					* config block */
+	uint32_t sli_mode  :  4;
+	uint32_t cMA       :  1;
+	uint32_t cHpcb     :  1;
+	uint32_t cET       :  1;
+	uint32_t cBE       :  1;
+#endif
+
+	uint32_t pcbLow;       /* bit 31:0  of memory based port config block */
+	uint32_t pcbHigh;      /* bit 63:32 of memory based port config block */
+	uint32_t hbainit[5];
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t hps	   :  1; /* bit 31 word9 Host Pointer in slim */
+	uint32_t rsvd	   : 31; /* least significant 31 bits of word 9 */
+#else   /*  __LITTLE_ENDIAN */
+	uint32_t rsvd      : 31; /* least significant 31 bits of word 9 */
+	uint32_t hps	   :  1; /* bit 31 word9 Host Pointer in slim */
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd1     : 19;  /* Reserved                             */
+	uint32_t cdss      :  1;  /* Configure Data Security SLI          */
+	uint32_t casabt    :  1;  /* Configure async abts status notice   */
+	uint32_t rsvd2     :  2;  /* Reserved                             */
+	uint32_t cbg       :  1;  /* Configure BlockGuard                 */
+	uint32_t cmv       :  1;  /* Configure Max VPIs                   */
+	uint32_t ccrp      :  1;  /* Config Command Ring Polling          */
+	uint32_t csah      :  1;  /* Configure Synchronous Abort Handling */
+	uint32_t chbs      :  1;  /* Cofigure Host Backing store          */
+	uint32_t cinb      :  1;  /* Enable Interrupt Notification Block  */
+	uint32_t cerbm	   :  1;  /* Configure Enhanced Receive Buf Mgmt  */
+	uint32_t cmx	   :  1;  /* Configure Max XRIs                   */
+	uint32_t cmr	   :  1;  /* Configure Max RPIs                   */
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t cmr	   :  1;  /* Configure Max RPIs                   */
+	uint32_t cmx	   :  1;  /* Configure Max XRIs                   */
+	uint32_t cerbm	   :  1;  /* Configure Enhanced Receive Buf Mgmt  */
+	uint32_t cinb      :  1;  /* Enable Interrupt Notification Block  */
+	uint32_t chbs      :  1;  /* Cofigure Host Backing store          */
+	uint32_t csah      :  1;  /* Configure Synchronous Abort Handling */
+	uint32_t ccrp      :  1;  /* Config Command Ring Polling          */
+	uint32_t cmv	   :  1;  /* Configure Max VPIs                   */
+	uint32_t cbg       :  1;  /* Configure BlockGuard                 */
+	uint32_t rsvd2     :  2;  /* Reserved                             */
+	uint32_t casabt    :  1;  /* Configure async abts status notice   */
+	uint32_t cdss      :  1;  /* Configure Data Security SLI          */
+	uint32_t rsvd1     : 19;  /* Reserved                             */
+#endif
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd3     : 19;  /* Reserved                             */
+	uint32_t gdss      :  1;  /* Configure Data Security SLI          */
+	uint32_t gasabt    :  1;  /* Grant async abts status notice       */
+	uint32_t rsvd4     :  2;  /* Reserved                             */
+	uint32_t gbg       :  1;  /* Grant BlockGuard                     */
+	uint32_t gmv	   :  1;  /* Grant Max VPIs                       */
+	uint32_t gcrp	   :  1;  /* Grant Command Ring Polling           */
+	uint32_t gsah	   :  1;  /* Grant Synchronous Abort Handling     */
+	uint32_t ghbs	   :  1;  /* Grant Host Backing Store             */
+	uint32_t ginb	   :  1;  /* Grant Interrupt Notification Block   */
+	uint32_t gerbm	   :  1;  /* Grant ERBM Request                   */
+	uint32_t gmx	   :  1;  /* Grant Max XRIs                       */
+	uint32_t gmr	   :  1;  /* Grant Max RPIs                       */
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t gmr	   :  1;  /* Grant Max RPIs                       */
+	uint32_t gmx	   :  1;  /* Grant Max XRIs                       */
+	uint32_t gerbm	   :  1;  /* Grant ERBM Request                   */
+	uint32_t ginb	   :  1;  /* Grant Interrupt Notification Block   */
+	uint32_t ghbs	   :  1;  /* Grant Host Backing Store             */
+	uint32_t gsah	   :  1;  /* Grant Synchronous Abort Handling     */
+	uint32_t gcrp	   :  1;  /* Grant Command Ring Polling           */
+	uint32_t gmv	   :  1;  /* Grant Max VPIs                       */
+	uint32_t gbg       :  1;  /* Grant BlockGuard                     */
+	uint32_t rsvd4     :  2;  /* Reserved                             */
+	uint32_t gasabt    :  1;  /* Grant async abts status notice       */
+	uint32_t gdss      :  1;  /* Configure Data Security SLI          */
+	uint32_t rsvd3     : 19;  /* Reserved                             */
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t max_rpi   : 16;  /* Max RPIs Port should configure       */
+	uint32_t max_xri   : 16;  /* Max XRIs Port should configure       */
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t max_xri   : 16;  /* Max XRIs Port should configure       */
+	uint32_t max_rpi   : 16;  /* Max RPIs Port should configure       */
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t max_hbq   : 16;  /* Max HBQs Host expect to configure    */
+	uint32_t rsvd5     : 16;  /* Max HBQs Host expect to configure    */
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t rsvd5     : 16;  /* Max HBQs Host expect to configure    */
+	uint32_t max_hbq   : 16;  /* Max HBQs Host expect to configure    */
+#endif
+
+	uint32_t rsvd6;           /* Reserved                             */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t fips_rev   : 3;   /* FIPS Spec Revision                   */
+	uint32_t fips_level : 4;   /* FIPS Level                           */
+	uint32_t sec_err    : 9;   /* security crypto error                */
+	uint32_t max_vpi    : 16;  /* Max number of virt N-Ports           */
+#else	/*  __LITTLE_ENDIAN */
+	uint32_t max_vpi    : 16;  /* Max number of virt N-Ports           */
+	uint32_t sec_err    : 9;   /* security crypto error                */
+	uint32_t fips_level : 4;   /* FIPS Level                           */
+	uint32_t fips_rev   : 3;   /* FIPS Spec Revision                   */
+#endif
+
+} CONFIG_PORT_VAR;
+
+/* Structure for MB Command CONFIG_MSI (0x30) */
+struct config_msi_var {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t dfltMsgNum:8;	/* Default message number            */
+	uint32_t rsvd1:11;	/* Reserved                          */
+	uint32_t NID:5;		/* Number of secondary attention IDs */
+	uint32_t rsvd2:5;	/* Reserved                          */
+	uint32_t dfltPresent:1;	/* Default message number present    */
+	uint32_t addFlag:1;	/* Add association flag              */
+	uint32_t reportFlag:1;	/* Report association flag           */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t reportFlag:1;	/* Report association flag           */
+	uint32_t addFlag:1;	/* Add association flag              */
+	uint32_t dfltPresent:1;	/* Default message number present    */
+	uint32_t rsvd2:5;	/* Reserved                          */
+	uint32_t NID:5;		/* Number of secondary attention IDs */
+	uint32_t rsvd1:11;	/* Reserved                          */
+	uint32_t dfltMsgNum:8;	/* Default message number            */
+#endif
+	uint32_t attentionConditions[2];
+	uint8_t  attentionId[16];
+	uint8_t  messageNumberByHA[64];
+	uint8_t  messageNumberByID[16];
+	uint32_t autoClearHA[2];
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd3:16;
+	uint32_t autoClearID:16;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t autoClearID:16;
+	uint32_t rsvd3:16;
+#endif
+	uint32_t rsvd4;
+};
+
+/* SLI-2 Port Control Block */
+
+/* SLIM POINTER */
+#define SLIMOFF 0x30		/* WORD */
+
+typedef struct _SLI2_RDSC {
+	uint32_t cmdEntries;
+	uint32_t cmdAddrLow;
+	uint32_t cmdAddrHigh;
+
+	uint32_t rspEntries;
+	uint32_t rspAddrLow;
+	uint32_t rspAddrHigh;
+} SLI2_RDSC;
+
+typedef struct _PCB {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t type:8;
+#define TYPE_NATIVE_SLI2       0x01
+	uint32_t feature:8;
+#define FEATURE_INITIAL_SLI2   0x01
+	uint32_t rsvd:12;
+	uint32_t maxRing:4;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t maxRing:4;
+	uint32_t rsvd:12;
+	uint32_t feature:8;
+#define FEATURE_INITIAL_SLI2   0x01
+	uint32_t type:8;
+#define TYPE_NATIVE_SLI2       0x01
+#endif
+
+	uint32_t mailBoxSize;
+	uint32_t mbAddrLow;
+	uint32_t mbAddrHigh;
+
+	uint32_t hgpAddrLow;
+	uint32_t hgpAddrHigh;
+
+	uint32_t pgpAddrLow;
+	uint32_t pgpAddrHigh;
+	SLI2_RDSC rdsc[MAX_RINGS];
+} PCB_t;
+
+/* NEW_FEATURE */
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd0:27;
+	uint32_t discardFarp:1;
+	uint32_t IPEnable:1;
+	uint32_t nodeName:1;
+	uint32_t portName:1;
+	uint32_t filterEnable:1;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t filterEnable:1;
+	uint32_t portName:1;
+	uint32_t nodeName:1;
+	uint32_t IPEnable:1;
+	uint32_t discardFarp:1;
+	uint32_t rsvd:27;
+#endif
+
+	uint8_t portname[8];	/* Used to be struct lpfc_name */
+	uint8_t nodename[8];
+	uint32_t rsvd1;
+	uint32_t rsvd2;
+	uint32_t rsvd3;
+	uint32_t IPAddress;
+} CONFIG_FARP_VAR;
+
+/* Structure for MB Command MBX_ASYNCEVT_ENABLE (0x33) */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rsvd:30;
+	uint32_t ring:2;	/* Ring for ASYNC_EVENT iocb Bits 0-1*/
+#else /*  __LITTLE_ENDIAN */
+	uint32_t ring:2;	/* Ring for ASYNC_EVENT iocb Bits 0-1*/
+	uint32_t rsvd:30;
+#endif
+} ASYNCEVT_ENABLE_VAR;
+
+/* Union of all Mailbox Command types */
+#define MAILBOX_CMD_WSIZE	32
+#define MAILBOX_CMD_SIZE	(MAILBOX_CMD_WSIZE * sizeof(uint32_t))
+/* ext_wsize times 4 bytes should not be greater than max xmit size */
+#define MAILBOX_EXT_WSIZE	512
+#define MAILBOX_EXT_SIZE	(MAILBOX_EXT_WSIZE * sizeof(uint32_t))
+#define MAILBOX_HBA_EXT_OFFSET  0x100
+/* max mbox xmit size is a page size for sysfs IO operations */
+#define MAILBOX_SYSFS_MAX	4096
+
+typedef union {
+	uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
+						    * feature/max ring number
+						    */
+	LOAD_SM_VAR varLdSM;		/* cmd =  1 (LOAD_SM)        */
+	READ_NV_VAR varRDnvp;		/* cmd =  2 (READ_NVPARMS)   */
+	WRITE_NV_VAR varWTnvp;		/* cmd =  3 (WRITE_NVPARMS)  */
+	BIU_DIAG_VAR varBIUdiag;	/* cmd =  4 (RUN_BIU_DIAG)   */
+	INIT_LINK_VAR varInitLnk;	/* cmd =  5 (INIT_LINK)      */
+	DOWN_LINK_VAR varDwnLnk;	/* cmd =  6 (DOWN_LINK)      */
+	CONFIG_LINK varCfgLnk;		/* cmd =  7 (CONFIG_LINK)    */
+	PART_SLIM_VAR varSlim;		/* cmd =  8 (PART_SLIM)      */
+	CONFIG_RING_VAR varCfgRing;	/* cmd =  9 (CONFIG_RING)    */
+	RESET_RING_VAR varRstRing;	/* cmd = 10 (RESET_RING)     */
+	READ_CONFIG_VAR varRdConfig;	/* cmd = 11 (READ_CONFIG)    */
+	READ_RCONF_VAR varRdRConfig;	/* cmd = 12 (READ_RCONFIG)   */
+	READ_SPARM_VAR varRdSparm;	/* cmd = 13 (READ_SPARM(64)) */
+	READ_STATUS_VAR varRdStatus;	/* cmd = 14 (READ_STATUS)    */
+	READ_RPI_VAR varRdRPI;		/* cmd = 15 (READ_RPI(64))   */
+	READ_XRI_VAR varRdXRI;		/* cmd = 16 (READ_XRI)       */
+	READ_REV_VAR varRdRev;		/* cmd = 17 (READ_REV)       */
+	READ_LNK_VAR varRdLnk;		/* cmd = 18 (READ_LNK_STAT)  */
+	REG_LOGIN_VAR varRegLogin;	/* cmd = 19 (REG_LOGIN(64))  */
+	UNREG_LOGIN_VAR varUnregLogin;	/* cmd = 20 (UNREG_LOGIN)    */
+	CLEAR_LA_VAR varClearLA;	/* cmd = 22 (CLEAR_LA)       */
+	DUMP_VAR varDmp;		/* Warm Start DUMP mbx cmd   */
+	UNREG_D_ID_VAR varUnregDID;	/* cmd = 0x23 (UNREG_D_ID)   */
+	CONFIG_FARP_VAR varCfgFarp;	/* cmd = 0x25 (CONFIG_FARP)
+					 * NEW_FEATURE
+					 */
+	struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ)  */
+	struct update_cfg_var varUpdateCfg; /* cmd = 0x1B (UPDATE_CFG)*/
+	CONFIG_PORT_VAR varCfgPort;	/* cmd = 0x88 (CONFIG_PORT)  */
+	struct lpfc_mbx_read_top varReadTop; /* cmd = 0x95 (READ_TOPOLOGY) */
+	REG_VPI_VAR varRegVpi;		/* cmd = 0x96 (REG_VPI) */
+	UNREG_VPI_VAR varUnregVpi;	/* cmd = 0x97 (UNREG_VPI) */
+	ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */
+	struct READ_EVENT_LOG_VAR varRdEventLog;	/* cmd = 0x38
+							 * (READ_EVENT_LOG)
+							 */
+	struct config_msi_var varCfgMSI;/* cmd = x30 (CONFIG_MSI)     */
+} MAILVARIANTS;
+
+/*
+ * SLI-2 specific structures
+ */
+
+struct lpfc_hgp {
+	__le32 cmdPutInx;
+	__le32 rspGetInx;
+};
+
+struct lpfc_pgp {
+	__le32 cmdGetInx;
+	__le32 rspPutInx;
+};
+
+struct sli2_desc {
+	uint32_t unused1[16];
+	struct lpfc_hgp host[MAX_RINGS];
+	struct lpfc_pgp port[MAX_RINGS];
+};
+
+struct sli3_desc {
+	struct lpfc_hgp host[MAX_RINGS];
+	uint32_t reserved[8];
+	uint32_t hbq_put[16];
+};
+
+struct sli3_pgp {
+	struct lpfc_pgp port[MAX_RINGS];
+	uint32_t hbq_get[16];
+};
+
+union sli_var {
+	struct sli2_desc	s2;
+	struct sli3_desc	s3;
+	struct sli3_pgp		s3_pgp;
+};
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t mbxStatus;
+	uint8_t mbxCommand;
+	uint8_t mbxReserved:6;
+	uint8_t mbxHc:1;
+	uint8_t mbxOwner:1;	/* Low order bit first word */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint8_t mbxOwner:1;	/* Low order bit first word */
+	uint8_t mbxHc:1;
+	uint8_t mbxReserved:6;
+	uint8_t mbxCommand;
+	uint16_t mbxStatus;
+#endif
+
+	MAILVARIANTS un;
+	union sli_var us;
+} MAILBOX_t;
+
+/*
+ *    Begin Structure Definitions for IOCB Commands
+ */
+
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t statAction;
+	uint8_t statRsn;
+	uint8_t statBaExp;
+	uint8_t statLocalError;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint8_t statLocalError;
+	uint8_t statBaExp;
+	uint8_t statRsn;
+	uint8_t statAction;
+#endif
+	/* statRsn  P/F_RJT reason codes */
+#define RJT_BAD_D_ID       0x01	/* Invalid D_ID field */
+#define RJT_BAD_S_ID       0x02	/* Invalid S_ID field */
+#define RJT_UNAVAIL_TEMP   0x03	/* N_Port unavailable temp. */
+#define RJT_UNAVAIL_PERM   0x04	/* N_Port unavailable perm. */
+#define RJT_UNSUP_CLASS    0x05	/* Class not supported */
+#define RJT_DELIM_ERR      0x06	/* Delimiter usage error */
+#define RJT_UNSUP_TYPE     0x07	/* Type not supported */
+#define RJT_BAD_CONTROL    0x08	/* Invalid link conrtol */
+#define RJT_BAD_RCTL       0x09	/* R_CTL invalid */
+#define RJT_BAD_FCTL       0x0A	/* F_CTL invalid */
+#define RJT_BAD_OXID       0x0B	/* OX_ID invalid */
+#define RJT_BAD_RXID       0x0C	/* RX_ID invalid */
+#define RJT_BAD_SEQID      0x0D	/* SEQ_ID invalid */
+#define RJT_BAD_DFCTL      0x0E	/* DF_CTL invalid */
+#define RJT_BAD_SEQCNT     0x0F	/* SEQ_CNT invalid */
+#define RJT_BAD_PARM       0x10	/* Param. field invalid */
+#define RJT_XCHG_ERR       0x11	/* Exchange error */
+#define RJT_PROT_ERR       0x12	/* Protocol error */
+#define RJT_BAD_LENGTH     0x13	/* Invalid Length */
+#define RJT_UNEXPECTED_ACK 0x14	/* Unexpected ACK */
+#define RJT_LOGIN_REQUIRED 0x16	/* Login required */
+#define RJT_TOO_MANY_SEQ   0x17	/* Excessive sequences */
+#define RJT_XCHG_NOT_STRT  0x18	/* Exchange not started */
+#define RJT_UNSUP_SEC_HDR  0x19	/* Security hdr not supported */
+#define RJT_UNAVAIL_PATH   0x1A	/* Fabric Path not available */
+#define RJT_VENDOR_UNIQUE  0xFF	/* Vendor unique error */
+
+#define IOERR_SUCCESS                 0x00	/* statLocalError */
+#define IOERR_MISSING_CONTINUE        0x01
+#define IOERR_SEQUENCE_TIMEOUT        0x02
+#define IOERR_INTERNAL_ERROR          0x03
+#define IOERR_INVALID_RPI             0x04
+#define IOERR_NO_XRI                  0x05
+#define IOERR_ILLEGAL_COMMAND         0x06
+#define IOERR_XCHG_DROPPED            0x07
+#define IOERR_ILLEGAL_FIELD           0x08
+#define IOERR_BAD_CONTINUE            0x09
+#define IOERR_TOO_MANY_BUFFERS        0x0A
+#define IOERR_RCV_BUFFER_WAITING      0x0B
+#define IOERR_NO_CONNECTION           0x0C
+#define IOERR_TX_DMA_FAILED           0x0D
+#define IOERR_RX_DMA_FAILED           0x0E
+#define IOERR_ILLEGAL_FRAME           0x0F
+#define IOERR_EXTRA_DATA              0x10
+#define IOERR_NO_RESOURCES            0x11
+#define IOERR_RESERVED                0x12
+#define IOERR_ILLEGAL_LENGTH          0x13
+#define IOERR_UNSUPPORTED_FEATURE     0x14
+#define IOERR_ABORT_IN_PROGRESS       0x15
+#define IOERR_ABORT_REQUESTED         0x16
+#define IOERR_RECEIVE_BUFFER_TIMEOUT  0x17
+#define IOERR_LOOP_OPEN_FAILURE       0x18
+#define IOERR_RING_RESET              0x19
+#define IOERR_LINK_DOWN               0x1A
+#define IOERR_CORRUPTED_DATA          0x1B
+#define IOERR_CORRUPTED_RPI           0x1C
+#define IOERR_OUT_OF_ORDER_DATA       0x1D
+#define IOERR_OUT_OF_ORDER_ACK        0x1E
+#define IOERR_DUP_FRAME               0x1F
+#define IOERR_LINK_CONTROL_FRAME      0x20	/* ACK_N received */
+#define IOERR_BAD_HOST_ADDRESS        0x21
+#define IOERR_RCV_HDRBUF_WAITING      0x22
+#define IOERR_MISSING_HDR_BUFFER      0x23
+#define IOERR_MSEQ_CHAIN_CORRUPTED    0x24
+#define IOERR_ABORTMULT_REQUESTED     0x25
+#define IOERR_BUFFER_SHORTAGE         0x28
+#define IOERR_DEFAULT                 0x29
+#define IOERR_CNT                     0x2A
+#define IOERR_SLER_FAILURE            0x46
+#define IOERR_SLER_CMD_RCV_FAILURE    0x47
+#define IOERR_SLER_REC_RJT_ERR        0x48
+#define IOERR_SLER_REC_SRR_RETRY_ERR  0x49
+#define IOERR_SLER_SRR_RJT_ERR        0x4A
+#define IOERR_SLER_RRQ_RJT_ERR        0x4C
+#define IOERR_SLER_RRQ_RETRY_ERR      0x4D
+#define IOERR_SLER_ABTS_ERR           0x4E
+#define IOERR_ELXSEC_KEY_UNWRAP_ERROR		0xF0
+#define IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR	0xF1
+#define IOERR_ELXSEC_CRYPTO_ERROR		0xF2
+#define IOERR_ELXSEC_CRYPTO_COMPARE_ERROR	0xF3
+#define IOERR_DRVR_MASK               0x100
+#define IOERR_SLI_DOWN                0x101  /* ulpStatus  - Driver defined */
+#define IOERR_SLI_BRESET              0x102
+#define IOERR_SLI_ABORTED             0x103
+} PARM_ERR;
+
+typedef union {
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint8_t Rctl;	/* R_CTL field */
+		uint8_t Type;	/* TYPE field */
+		uint8_t Dfctl;	/* DF_CTL field */
+		uint8_t Fctl;	/* Bits 0-7 of IOCB word 5 */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+		uint8_t Fctl;	/* Bits 0-7 of IOCB word 5 */
+		uint8_t Dfctl;	/* DF_CTL field */
+		uint8_t Type;	/* TYPE field */
+		uint8_t Rctl;	/* R_CTL field */
+#endif
+
+#define BC      0x02		/* Broadcast Received  - Fctl */
+#define SI      0x04		/* Sequence Initiative */
+#define LA      0x08		/* Ignore Link Attention state */
+#define LS      0x80		/* Last Sequence */
+	} hcsw;
+	uint32_t reserved;
+} WORD5;
+
+/* IOCB Command template for a generic response */
+typedef struct {
+	uint32_t reserved[4];
+	PARM_ERR perr;
+} GENERIC_RSP;
+
+/* IOCB Command template for XMIT / XMIT_BCAST / RCV_SEQUENCE / XMIT_ELS */
+typedef struct {
+	struct ulp_bde xrsqbde[2];
+	uint32_t xrsqRo;	/* Starting Relative Offset */
+	WORD5 w5;		/* Header control/status word */
+} XR_SEQ_FIELDS;
+
+/* IOCB Command template for ELS_REQUEST */
+typedef struct {
+	struct ulp_bde elsReq;
+	struct ulp_bde elsRsp;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t word4Rsvd:7;
+	uint32_t fl:1;
+	uint32_t myID:24;
+	uint32_t word5Rsvd:8;
+	uint32_t remoteID:24;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t myID:24;
+	uint32_t fl:1;
+	uint32_t word4Rsvd:7;
+	uint32_t remoteID:24;
+	uint32_t word5Rsvd:8;
+#endif
+} ELS_REQUEST;
+
+/* IOCB Command template for RCV_ELS_REQ */
+typedef struct {
+	struct ulp_bde elsReq[2];
+	uint32_t parmRo;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t word5Rsvd:8;
+	uint32_t remoteID:24;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t remoteID:24;
+	uint32_t word5Rsvd:8;
+#endif
+} RCV_ELS_REQ;
+
+/* IOCB Command template for ABORT / CLOSE_XRI */
+typedef struct {
+	uint32_t rsvd[3];
+	uint32_t abortType;
+#define ABORT_TYPE_ABTX  0x00000000
+#define ABORT_TYPE_ABTS  0x00000001
+	uint32_t parm;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t abortContextTag; /* ulpContext from command to abort/close */
+	uint16_t abortIoTag;	/* ulpIoTag from command to abort/close */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t abortIoTag;	/* ulpIoTag from command to abort/close */
+	uint16_t abortContextTag; /* ulpContext from command to abort/close */
+#endif
+} AC_XRI;
+
+/* IOCB Command template for ABORT_MXRI64 */
+typedef struct {
+	uint32_t rsvd[3];
+	uint32_t abortType;
+	uint32_t parm;
+	uint32_t iotag32;
+} A_MXRI64;
+
+/* IOCB Command template for GET_RPI */
+typedef struct {
+	uint32_t rsvd[4];
+	uint32_t parmRo;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t word5Rsvd:8;
+	uint32_t remoteID:24;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t remoteID:24;
+	uint32_t word5Rsvd:8;
+#endif
+} GET_RPI;
+
+/* IOCB Command template for all FCP Initiator commands */
+typedef struct {
+	struct ulp_bde fcpi_cmnd;	/* FCP_CMND payload descriptor */
+	struct ulp_bde fcpi_rsp;	/* Rcv buffer */
+	uint32_t fcpi_parm;
+	uint32_t fcpi_XRdy;	/* transfer ready for IWRITE */
+} FCPI_FIELDS;
+
+/* IOCB Command template for all FCP Target commands */
+typedef struct {
+	struct ulp_bde fcpt_Buffer[2];	/* FCP_CMND payload descriptor */
+	uint32_t fcpt_Offset;
+	uint32_t fcpt_Length;	/* transfer ready for IWRITE */
+} FCPT_FIELDS;
+
+/* SLI-2 IOCB structure definitions */
+
+/* IOCB Command template for 64 bit XMIT / XMIT_BCAST / XMIT_ELS */
+typedef struct {
+	ULP_BDL bdl;
+	uint32_t xrsqRo;	/* Starting Relative Offset */
+	WORD5 w5;		/* Header control/status word */
+} XMT_SEQ_FIELDS64;
+
+/* IOCB Command template for 64 bit RCV_SEQUENCE64 */
+typedef struct {
+	struct ulp_bde64 rcvBde;
+	uint32_t rsvd1;
+	uint32_t xrsqRo;	/* Starting Relative Offset */
+	WORD5 w5;		/* Header control/status word */
+} RCV_SEQ_FIELDS64;
+
+/* IOCB Command template for ELS_REQUEST64 */
+typedef struct {
+	ULP_BDL bdl;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t word4Rsvd:7;
+	uint32_t fl:1;
+	uint32_t myID:24;
+	uint32_t word5Rsvd:8;
+	uint32_t remoteID:24;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t myID:24;
+	uint32_t fl:1;
+	uint32_t word4Rsvd:7;
+	uint32_t remoteID:24;
+	uint32_t word5Rsvd:8;
+#endif
+} ELS_REQUEST64;
+
+/* IOCB Command template for GEN_REQUEST64 */
+typedef struct {
+	ULP_BDL bdl;
+	uint32_t xrsqRo;	/* Starting Relative Offset */
+	WORD5 w5;		/* Header control/status word */
+} GEN_REQUEST64;
+
+/* IOCB Command template for RCV_ELS_REQ64 */
+typedef struct {
+	struct ulp_bde64 elsReq;
+	uint32_t rcvd1;
+	uint32_t parmRo;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t word5Rsvd:8;
+	uint32_t remoteID:24;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t remoteID:24;
+	uint32_t word5Rsvd:8;
+#endif
+} RCV_ELS_REQ64;
+
+/* IOCB Command template for RCV_SEQ64 */
+struct rcv_seq64 {
+	struct ulp_bde64 elsReq;
+	uint32_t hbq_1;
+	uint32_t parmRo;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t rctl:8;
+	uint32_t type:8;
+	uint32_t dfctl:8;
+	uint32_t ls:1;
+	uint32_t fs:1;
+	uint32_t rsvd2:3;
+	uint32_t si:1;
+	uint32_t bc:1;
+	uint32_t rsvd3:1;
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t rsvd3:1;
+	uint32_t bc:1;
+	uint32_t si:1;
+	uint32_t rsvd2:3;
+	uint32_t fs:1;
+	uint32_t ls:1;
+	uint32_t dfctl:8;
+	uint32_t type:8;
+	uint32_t rctl:8;
+#endif
+};
+
+/* IOCB Command template for all 64 bit FCP Initiator commands */
+typedef struct {
+	ULP_BDL bdl;
+	uint32_t fcpi_parm;
+	uint32_t fcpi_XRdy;	/* transfer ready for IWRITE */
+} FCPI_FIELDS64;
+
+/* IOCB Command template for all 64 bit FCP Target commands */
+typedef struct {
+	ULP_BDL bdl;
+	uint32_t fcpt_Offset;
+	uint32_t fcpt_Length;	/* transfer ready for IWRITE */
+} FCPT_FIELDS64;
+
+/* IOCB Command template for Async Status iocb commands */
+typedef struct {
+	uint32_t rsvd[4];
+	uint32_t param;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t evt_code;		/* High order bits word 5 */
+	uint16_t sub_ctxt_tag;		/* Low  order bits word 5 */
+#else   /*  __LITTLE_ENDIAN_BITFIELD */
+	uint16_t sub_ctxt_tag;		/* High order bits word 5 */
+	uint16_t evt_code;		/* Low  order bits word 5 */
+#endif
+} ASYNCSTAT_FIELDS;
+#define ASYNC_TEMP_WARN		0x100
+#define ASYNC_TEMP_SAFE		0x101
+#define ASYNC_STATUS_CN		0x102
+
+/* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7)
+   or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
+
+struct rcv_sli3 {
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint16_t ox_id;
+	uint16_t seq_cnt;
+
+	uint16_t vpi;
+	uint16_t word9Rsvd;
+#else  /*  __LITTLE_ENDIAN */
+	uint16_t seq_cnt;
+	uint16_t ox_id;
+
+	uint16_t word9Rsvd;
+	uint16_t vpi;
+#endif
+	uint32_t word10Rsvd;
+	uint32_t acc_len;      /* accumulated length */
+	struct ulp_bde64 bde2;
+};
+
+/* Structure used for a single HBQ entry */
+struct lpfc_hbq_entry {
+	struct ulp_bde64 bde;
+	uint32_t buffer_tag;
+};
+
+/* IOCB Command template for QUE_XRI64_CX (0xB3) command */
+typedef struct {
+	struct lpfc_hbq_entry   buff;
+	uint32_t                rsvd;
+	uint32_t		rsvd1;
+} QUE_XRI64_CX_FIELDS;
+
+struct que_xri64cx_ext_fields {
+	uint32_t	iotag64_low;
+	uint32_t	iotag64_high;
+	uint32_t	ebde_count;
+	uint32_t	rsvd;
+	struct lpfc_hbq_entry	buff[5];
+};
+
+struct sli3_bg_fields {
+	uint32_t filler[6];	/* word 8-13 in IOCB */
+	uint32_t bghm;		/* word 14 - BlockGuard High Water Mark */
+/* Bitfields for bgstat (BlockGuard Status - word 15 of IOCB) */
+#define BGS_BIDIR_BG_PROF_MASK		0xff000000
+#define BGS_BIDIR_BG_PROF_SHIFT		24
+#define BGS_BIDIR_ERR_COND_FLAGS_MASK	0x003f0000
+#define BGS_BIDIR_ERR_COND_SHIFT	16
+#define BGS_BG_PROFILE_MASK		0x0000ff00
+#define BGS_BG_PROFILE_SHIFT		8
+#define BGS_INVALID_PROF_MASK		0x00000020
+#define BGS_INVALID_PROF_SHIFT		5
+#define BGS_UNINIT_DIF_BLOCK_MASK	0x00000010
+#define BGS_UNINIT_DIF_BLOCK_SHIFT	4
+#define BGS_HI_WATER_MARK_PRESENT_MASK	0x00000008
+#define BGS_HI_WATER_MARK_PRESENT_SHIFT	3
+#define BGS_REFTAG_ERR_MASK		0x00000004
+#define BGS_REFTAG_ERR_SHIFT		2
+#define BGS_APPTAG_ERR_MASK		0x00000002
+#define BGS_APPTAG_ERR_SHIFT		1
+#define BGS_GUARD_ERR_MASK		0x00000001
+#define BGS_GUARD_ERR_SHIFT		0
+	uint32_t bgstat;	/* word 15 - BlockGuard Status */
+};
+
+static inline uint32_t
+lpfc_bgs_get_bidir_bg_prof(uint32_t bgstat)
+{
+	return (bgstat & BGS_BIDIR_BG_PROF_MASK) >>
+				BGS_BIDIR_BG_PROF_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_bidir_err_cond(uint32_t bgstat)
+{
+	return (bgstat & BGS_BIDIR_ERR_COND_FLAGS_MASK) >>
+				BGS_BIDIR_ERR_COND_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_bg_prof(uint32_t bgstat)
+{
+	return (bgstat & BGS_BG_PROFILE_MASK) >>
+				BGS_BG_PROFILE_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_invalid_prof(uint32_t bgstat)
+{
+	return (bgstat & BGS_INVALID_PROF_MASK) >>
+				BGS_INVALID_PROF_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_uninit_dif_block(uint32_t bgstat)
+{
+	return (bgstat & BGS_UNINIT_DIF_BLOCK_MASK) >>
+				BGS_UNINIT_DIF_BLOCK_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_hi_water_mark_present(uint32_t bgstat)
+{
+	return (bgstat & BGS_HI_WATER_MARK_PRESENT_MASK) >>
+				BGS_HI_WATER_MARK_PRESENT_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_reftag_err(uint32_t bgstat)
+{
+	return (bgstat & BGS_REFTAG_ERR_MASK) >>
+				BGS_REFTAG_ERR_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_apptag_err(uint32_t bgstat)
+{
+	return (bgstat & BGS_APPTAG_ERR_MASK) >>
+				BGS_APPTAG_ERR_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_guard_err(uint32_t bgstat)
+{
+	return (bgstat & BGS_GUARD_ERR_MASK) >>
+				BGS_GUARD_ERR_SHIFT;
+}
+
+#define LPFC_EXT_DATA_BDE_COUNT 3
+struct fcp_irw_ext {
+	uint32_t	io_tag64_low;
+	uint32_t	io_tag64_high;
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint8_t		reserved1;
+	uint8_t		reserved2;
+	uint8_t		reserved3;
+	uint8_t		ebde_count;
+#else  /* __LITTLE_ENDIAN */
+	uint8_t		ebde_count;
+	uint8_t		reserved3;
+	uint8_t		reserved2;
+	uint8_t		reserved1;
+#endif
+	uint32_t	reserved4;
+	struct ulp_bde64 rbde;		/* response bde */
+	struct ulp_bde64 dbde[LPFC_EXT_DATA_BDE_COUNT];	/* data BDE or BPL */
+	uint8_t icd[32];		/* immediate command data (32 bytes) */
+};
+
+typedef struct _IOCB {	/* IOCB structure */
+	union {
+		GENERIC_RSP grsp;	/* Generic response */
+		XR_SEQ_FIELDS xrseq;	/* XMIT / BCAST / RCV_SEQUENCE cmd */
+		struct ulp_bde cont[3];	/* up to 3 continuation bdes */
+		RCV_ELS_REQ rcvels;	/* RCV_ELS_REQ template */
+		AC_XRI acxri;	/* ABORT / CLOSE_XRI template */
+		A_MXRI64 amxri;	/* abort multiple xri command overlay */
+		GET_RPI getrpi;	/* GET_RPI template */
+		FCPI_FIELDS fcpi;	/* FCP Initiator template */
+		FCPT_FIELDS fcpt;	/* FCP target template */
+
+		/* SLI-2 structures */
+
+		struct ulp_bde64 cont64[2];  /* up to 2 64 bit continuation
+					      * bde_64s */
+		ELS_REQUEST64 elsreq64;	/* ELS_REQUEST template */
+		GEN_REQUEST64 genreq64;	/* GEN_REQUEST template */
+		RCV_ELS_REQ64 rcvels64;	/* RCV_ELS_REQ template */
+		XMT_SEQ_FIELDS64 xseq64;	/* XMIT / BCAST cmd */
+		FCPI_FIELDS64 fcpi64;	/* FCP 64 bit Initiator template */
+		FCPT_FIELDS64 fcpt64;	/* FCP 64 bit target template */
+		ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */
+		QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */
+		struct rcv_seq64 rcvseq64;	/* RCV_SEQ64 and RCV_CONT64 */
+		struct sli4_bls_rsp bls_rsp; /* UNSOL ABTS BLS_RSP params */
+		uint32_t ulpWord[IOCB_WORD_SZ - 2];	/* generic 6 'words' */
+	} un;
+	union {
+		struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+			uint16_t ulpContext;	/* High order bits word 6 */
+			uint16_t ulpIoTag;	/* Low  order bits word 6 */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+			uint16_t ulpIoTag;	/* Low  order bits word 6 */
+			uint16_t ulpContext;	/* High order bits word 6 */
+#endif
+		} t1;
+		struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+			uint16_t ulpContext;	/* High order bits word 6 */
+			uint16_t ulpIoTag1:2;	/* Low  order bits word 6 */
+			uint16_t ulpIoTag0:14;	/* Low  order bits word 6 */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+			uint16_t ulpIoTag0:14;	/* Low  order bits word 6 */
+			uint16_t ulpIoTag1:2;	/* Low  order bits word 6 */
+			uint16_t ulpContext;	/* High order bits word 6 */
+#endif
+		} t2;
+	} un1;
+#define ulpContext un1.t1.ulpContext
+#define ulpIoTag   un1.t1.ulpIoTag
+#define ulpIoTag0  un1.t2.ulpIoTag0
+
+#ifdef __BIG_ENDIAN_BITFIELD
+	uint32_t ulpTimeout:8;
+	uint32_t ulpXS:1;
+	uint32_t ulpFCP2Rcvy:1;
+	uint32_t ulpPU:2;
+	uint32_t ulpIr:1;
+	uint32_t ulpClass:3;
+	uint32_t ulpCommand:8;
+	uint32_t ulpStatus:4;
+	uint32_t ulpBdeCount:2;
+	uint32_t ulpLe:1;
+	uint32_t ulpOwner:1;	/* Low order bit word 7 */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+	uint32_t ulpOwner:1;	/* Low order bit word 7 */
+	uint32_t ulpLe:1;
+	uint32_t ulpBdeCount:2;
+	uint32_t ulpStatus:4;
+	uint32_t ulpCommand:8;
+	uint32_t ulpClass:3;
+	uint32_t ulpIr:1;
+	uint32_t ulpPU:2;
+	uint32_t ulpFCP2Rcvy:1;
+	uint32_t ulpXS:1;
+	uint32_t ulpTimeout:8;
+#endif
+
+	union {
+		struct rcv_sli3 rcvsli3; /* words 8 - 15 */
+
+		/* words 8-31 used for que_xri_cx iocb */
+		struct que_xri64cx_ext_fields que_xri64cx_ext_words;
+		struct fcp_irw_ext fcp_ext;
+		uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */
+
+		/* words 8-15 for BlockGuard */
+		struct sli3_bg_fields sli3_bg;
+	} unsli3;
+
+#define ulpCt_h ulpXS
+#define ulpCt_l ulpFCP2Rcvy
+
+#define IOCB_FCP	   1	/* IOCB is used for FCP ELS cmds-ulpRsvByte */
+#define IOCB_IP		   2	/* IOCB is used for IP ELS cmds */
+#define PARM_UNUSED        0	/* PU field (Word 4) not used */
+#define PARM_REL_OFF       1	/* PU field (Word 4) = R. O. */
+#define PARM_READ_CHECK    2	/* PU field (Word 4) = Data Transfer Length */
+#define PARM_NPIV_DID	   3
+#define CLASS1             0	/* Class 1 */
+#define CLASS2             1	/* Class 2 */
+#define CLASS3             2	/* Class 3 */
+#define CLASS_FCP_INTERMIX 7	/* FCP Data->Cls 1, all else->Cls 2 */
+
+#define IOSTAT_SUCCESS         0x0	/* ulpStatus  - HBA defined */
+#define IOSTAT_FCP_RSP_ERROR   0x1
+#define IOSTAT_REMOTE_STOP     0x2
+#define IOSTAT_LOCAL_REJECT    0x3
+#define IOSTAT_NPORT_RJT       0x4
+#define IOSTAT_FABRIC_RJT      0x5
+#define IOSTAT_NPORT_BSY       0x6
+#define IOSTAT_FABRIC_BSY      0x7
+#define IOSTAT_INTERMED_RSP    0x8
+#define IOSTAT_LS_RJT          0x9
+#define IOSTAT_BA_RJT          0xA
+#define IOSTAT_RSVD1           0xB
+#define IOSTAT_RSVD2           0xC
+#define IOSTAT_RSVD3           0xD
+#define IOSTAT_RSVD4           0xE
+#define IOSTAT_NEED_BUFFER     0xF
+#define IOSTAT_DRIVER_REJECT   0x10   /* ulpStatus  - Driver defined */
+#define IOSTAT_DEFAULT         0xF    /* Same as rsvd5 for now */
+#define IOSTAT_CNT             0x11
+
+} IOCB_t;
+
+
+#define SLI1_SLIM_SIZE   (4 * 1024)
+
+/* Up to 498 IOCBs will fit into 16k
+ * 256 (MAILBOX_t) + 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384
+ */
+#define SLI2_SLIM_SIZE   (64 * 1024)
+
+/* Maximum IOCBs that will fit in SLI2 slim */
+#define MAX_SLI2_IOCB    498
+#define MAX_SLIM_IOCB_SIZE (SLI2_SLIM_SIZE - \
+			    (sizeof(MAILBOX_t) + sizeof(PCB_t) + \
+			    sizeof(uint32_t) * MAILBOX_EXT_WSIZE))
+
+/* HBQ entries are 4 words each = 4k */
+#define LPFC_TOTAL_HBQ_SIZE (sizeof(struct lpfc_hbq_entry) *  \
+			     lpfc_sli_hbq_count())
+
+struct lpfc_sli2_slim {
+	MAILBOX_t mbx;
+	uint32_t  mbx_ext_words[MAILBOX_EXT_WSIZE];
+	PCB_t pcb;
+	IOCB_t IOCBs[MAX_SLIM_IOCB_SIZE];
+};
+
+/*
+ * This function checks PCI device to allow special handling for LC HBAs.
+ *
+ * Parameters:
+ * device : struct pci_dev 's device field
+ *
+ * return 1 => TRUE
+ *        0 => FALSE
+ */
+static inline int
+lpfc_is_LC_HBA(unsigned short device)
+{
+	if ((device == PCI_DEVICE_ID_TFLY) ||
+	    (device == PCI_DEVICE_ID_PFLY) ||
+	    (device == PCI_DEVICE_ID_LP101) ||
+	    (device == PCI_DEVICE_ID_BMID) ||
+	    (device == PCI_DEVICE_ID_BSMB) ||
+	    (device == PCI_DEVICE_ID_ZMID) ||
+	    (device == PCI_DEVICE_ID_ZSMB) ||
+	    (device == PCI_DEVICE_ID_SAT_MID) ||
+	    (device == PCI_DEVICE_ID_SAT_SMB) ||
+	    (device == PCI_DEVICE_ID_RFLY))
+		return 1;
+	else
+		return 0;
+}
+
+/*
+ * Determine if an IOCB failed because of a link event or firmware reset.
+ */
+
+static inline int
+lpfc_error_lost_link(IOCB_t *iocbp)
+{
+	return (iocbp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+		(iocbp->un.ulpWord[4] == IOERR_SLI_ABORTED ||
+		 iocbp->un.ulpWord[4] == IOERR_LINK_DOWN ||
+		 iocbp->un.ulpWord[4] == IOERR_SLI_DOWN));
+}
+
+#define MENLO_TRANSPORT_TYPE 0xfe
+#define MENLO_CONTEXT 0
+#define MENLO_PU 3
+#define MENLO_TIMEOUT 30
+#define SETVAR_MLOMNT 0x103107
+#define SETVAR_MLORST 0x103007
+
+#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_hw4.h b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_hw4.h
new file mode 100644
index 0000000..91f0976
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_hw4.h
@@ -0,0 +1,3505 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2009-2012 Emulex.  All rights reserved.                *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+/* Macros to deal with bit fields. Each bit field must have 3 #defines
+ * associated with it (_SHIFT, _MASK, and _WORD).
+ * EG. For a bit field that is in the 7th bit of the "field4" field of a
+ * structure and is 2 bits in size the following #defines must exist:
+ *	struct temp {
+ *		uint32_t	field1;
+ *		uint32_t	field2;
+ *		uint32_t	field3;
+ *		uint32_t	field4;
+ *	#define example_bit_field_SHIFT		7
+ *	#define example_bit_field_MASK		0x03
+ *	#define example_bit_field_WORD		field4
+ *		uint32_t	field5;
+ *	};
+ * Then the macros below may be used to get or set the value of that field.
+ * EG. To get the value of the bit field from the above example:
+ *	struct temp t1;
+ *	value = bf_get(example_bit_field, &t1);
+ * And then to set that bit field:
+ *	bf_set(example_bit_field, &t1, 2);
+ * Or clear that bit field:
+ *	bf_set(example_bit_field, &t1, 0);
+ */
+#define bf_get_be32(name, ptr) \
+	((be32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK)
+#define bf_get_le32(name, ptr) \
+	((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK)
+#define bf_get(name, ptr) \
+	(((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
+#define bf_set_le32(name, ptr, value) \
+	((ptr)->name##_WORD = cpu_to_le32(((((value) & \
+	name##_MASK) << name##_SHIFT) | (le32_to_cpu((ptr)->name##_WORD) & \
+	~(name##_MASK << name##_SHIFT)))))
+#define bf_set(name, ptr, value) \
+	((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
+		 ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
+
+struct dma_address {
+	uint32_t addr_lo;
+	uint32_t addr_hi;
+};
+
+struct lpfc_sli_intf {
+	uint32_t word0;
+#define lpfc_sli_intf_valid_SHIFT		29
+#define lpfc_sli_intf_valid_MASK		0x00000007
+#define lpfc_sli_intf_valid_WORD		word0
+#define LPFC_SLI_INTF_VALID		6
+#define lpfc_sli_intf_sli_hint2_SHIFT		24
+#define lpfc_sli_intf_sli_hint2_MASK		0x0000001F
+#define lpfc_sli_intf_sli_hint2_WORD		word0
+#define LPFC_SLI_INTF_SLI_HINT2_NONE	0
+#define lpfc_sli_intf_sli_hint1_SHIFT		16
+#define lpfc_sli_intf_sli_hint1_MASK		0x000000FF
+#define lpfc_sli_intf_sli_hint1_WORD		word0
+#define LPFC_SLI_INTF_SLI_HINT1_NONE	0
+#define LPFC_SLI_INTF_SLI_HINT1_1	1
+#define LPFC_SLI_INTF_SLI_HINT1_2	2
+#define lpfc_sli_intf_if_type_SHIFT		12
+#define lpfc_sli_intf_if_type_MASK		0x0000000F
+#define lpfc_sli_intf_if_type_WORD		word0
+#define LPFC_SLI_INTF_IF_TYPE_0		0
+#define LPFC_SLI_INTF_IF_TYPE_1		1
+#define LPFC_SLI_INTF_IF_TYPE_2		2
+#define lpfc_sli_intf_sli_family_SHIFT		8
+#define lpfc_sli_intf_sli_family_MASK		0x0000000F
+#define lpfc_sli_intf_sli_family_WORD		word0
+#define LPFC_SLI_INTF_FAMILY_BE2	0x0
+#define LPFC_SLI_INTF_FAMILY_BE3	0x1
+#define LPFC_SLI_INTF_FAMILY_LNCR_A0	0xa
+#define LPFC_SLI_INTF_FAMILY_LNCR_B0	0xb
+#define lpfc_sli_intf_slirev_SHIFT		4
+#define lpfc_sli_intf_slirev_MASK		0x0000000F
+#define lpfc_sli_intf_slirev_WORD		word0
+#define LPFC_SLI_INTF_REV_SLI3		3
+#define LPFC_SLI_INTF_REV_SLI4		4
+#define lpfc_sli_intf_func_type_SHIFT		0
+#define lpfc_sli_intf_func_type_MASK		0x00000001
+#define lpfc_sli_intf_func_type_WORD		word0
+#define LPFC_SLI_INTF_IF_TYPE_PHYS	0
+#define LPFC_SLI_INTF_IF_TYPE_VIRT	1
+};
+
+#define LPFC_SLI4_MBX_EMBED	true
+#define LPFC_SLI4_MBX_NEMBED	false
+
+#define LPFC_SLI4_MB_WORD_COUNT		64
+#define LPFC_MAX_MQ_PAGE		8
+#define LPFC_MAX_WQ_PAGE		8
+#define LPFC_MAX_CQ_PAGE		4
+#define LPFC_MAX_EQ_PAGE		8
+
+#define LPFC_VIR_FUNC_MAX       32 /* Maximum number of virtual functions */
+#define LPFC_PCI_FUNC_MAX        5 /* Maximum number of PCI functions */
+#define LPFC_VFR_PAGE_SIZE	0x1000 /* 4KB BAR2 per-VF register page size */
+
+/* Define SLI4 Alignment requirements. */
+#define LPFC_ALIGN_16_BYTE	16
+#define LPFC_ALIGN_64_BYTE	64
+
+/* Define SLI4 specific definitions. */
+#define LPFC_MQ_CQE_BYTE_OFFSET	256
+#define LPFC_MBX_CMD_HDR_LENGTH 16
+#define LPFC_MBX_ERROR_RANGE	0x4000
+#define LPFC_BMBX_BIT1_ADDR_HI	0x2
+#define LPFC_BMBX_BIT1_ADDR_LO	0
+#define LPFC_RPI_HDR_COUNT	64
+#define LPFC_HDR_TEMPLATE_SIZE	4096
+#define LPFC_RPI_ALLOC_ERROR 	0xFFFF
+#define LPFC_FCF_RECORD_WD_CNT	132
+#define LPFC_ENTIRE_FCF_DATABASE 0
+#define LPFC_DFLT_FCF_INDEX	 0
+
+/* Virtual function numbers */
+#define LPFC_VF0		0
+#define LPFC_VF1		1
+#define LPFC_VF2		2
+#define LPFC_VF3		3
+#define LPFC_VF4		4
+#define LPFC_VF5		5
+#define LPFC_VF6		6
+#define LPFC_VF7		7
+#define LPFC_VF8		8
+#define LPFC_VF9		9
+#define LPFC_VF10		10
+#define LPFC_VF11		11
+#define LPFC_VF12		12
+#define LPFC_VF13		13
+#define LPFC_VF14		14
+#define LPFC_VF15		15
+#define LPFC_VF16		16
+#define LPFC_VF17		17
+#define LPFC_VF18		18
+#define LPFC_VF19		19
+#define LPFC_VF20		20
+#define LPFC_VF21		21
+#define LPFC_VF22		22
+#define LPFC_VF23		23
+#define LPFC_VF24		24
+#define LPFC_VF25		25
+#define LPFC_VF26		26
+#define LPFC_VF27		27
+#define LPFC_VF28		28
+#define LPFC_VF29		29
+#define LPFC_VF30		30
+#define LPFC_VF31		31
+
+/* PCI function numbers */
+#define LPFC_PCI_FUNC0		0
+#define LPFC_PCI_FUNC1		1
+#define LPFC_PCI_FUNC2		2
+#define LPFC_PCI_FUNC3		3
+#define LPFC_PCI_FUNC4		4
+
+/* SLI4 interface type-2 PDEV_CTL register */
+#define LPFC_CTL_PDEV_CTL_OFFSET	0x414
+#define LPFC_CTL_PDEV_CTL_DRST		0x00000001
+#define LPFC_CTL_PDEV_CTL_FRST		0x00000002
+#define LPFC_CTL_PDEV_CTL_DD		0x00000004
+#define LPFC_CTL_PDEV_CTL_LC		0x00000008
+#define LPFC_CTL_PDEV_CTL_FRL_ALL	0x00
+#define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE	0x10
+#define LPFC_CTL_PDEV_CTL_FRL_NIC	0x20
+
+#define LPFC_FW_DUMP_REQUEST    (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST)
+
+/* Active interrupt test count */
+#define LPFC_ACT_INTR_CNT	4
+
+/* Delay Multiplier constant */
+#define LPFC_DMULT_CONST       651042
+#define LPFC_MIM_IMAX          636
+#define LPFC_FP_DEF_IMAX       10000
+#define LPFC_SP_DEF_IMAX       10000
+
+/* PORT_CAPABILITIES constants. */
+#define LPFC_MAX_SUPPORTED_PAGES	8
+
+struct ulp_bde64 {
+	union ULP_BDE_TUS {
+		uint32_t w;
+		struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+			uint32_t bdeFlags:8;	/* BDE Flags 0 IS A SUPPORTED
+						   VALUE !! */
+			uint32_t bdeSize:24;	/* Size of buffer (in bytes) */
+#else	/*  __LITTLE_ENDIAN_BITFIELD */
+			uint32_t bdeSize:24;	/* Size of buffer (in bytes) */
+			uint32_t bdeFlags:8;	/* BDE Flags 0 IS A SUPPORTED
+						   VALUE !! */
+#endif
+#define BUFF_TYPE_BDE_64    0x00	/* BDE (Host_resident) */
+#define BUFF_TYPE_BDE_IMMED 0x01	/* Immediate Data BDE */
+#define BUFF_TYPE_BDE_64P   0x02	/* BDE (Port-resident) */
+#define BUFF_TYPE_BDE_64I   0x08	/* Input BDE (Host-resident) */
+#define BUFF_TYPE_BDE_64IP  0x0A	/* Input BDE (Port-resident) */
+#define BUFF_TYPE_BLP_64    0x40	/* BLP (Host-resident) */
+#define BUFF_TYPE_BLP_64P   0x42	/* BLP (Port-resident) */
+		} f;
+	} tus;
+	uint32_t addrLow;
+	uint32_t addrHigh;
+};
+
+struct lpfc_sli4_flags {
+	uint32_t word0;
+#define lpfc_idx_rsrc_rdy_SHIFT		0
+#define lpfc_idx_rsrc_rdy_MASK		0x00000001
+#define lpfc_idx_rsrc_rdy_WORD		word0
+#define LPFC_IDX_RSRC_RDY		1
+#define lpfc_xri_rsrc_rdy_SHIFT		1
+#define lpfc_xri_rsrc_rdy_MASK		0x00000001
+#define lpfc_xri_rsrc_rdy_WORD		word0
+#define LPFC_XRI_RSRC_RDY		1
+#define lpfc_rpi_rsrc_rdy_SHIFT		2
+#define lpfc_rpi_rsrc_rdy_MASK		0x00000001
+#define lpfc_rpi_rsrc_rdy_WORD		word0
+#define LPFC_RPI_RSRC_RDY		1
+#define lpfc_vpi_rsrc_rdy_SHIFT		3
+#define lpfc_vpi_rsrc_rdy_MASK		0x00000001
+#define lpfc_vpi_rsrc_rdy_WORD		word0
+#define LPFC_VPI_RSRC_RDY		1
+#define lpfc_vfi_rsrc_rdy_SHIFT		4
+#define lpfc_vfi_rsrc_rdy_MASK		0x00000001
+#define lpfc_vfi_rsrc_rdy_WORD		word0
+#define LPFC_VFI_RSRC_RDY		1
+};
+
+struct sli4_bls_rsp {
+	uint32_t word0_rsvd;      /* Word0 must be reserved */
+	uint32_t word1;
+#define lpfc_abts_orig_SHIFT      0
+#define lpfc_abts_orig_MASK       0x00000001
+#define lpfc_abts_orig_WORD       word1
+#define LPFC_ABTS_UNSOL_RSP       1
+#define LPFC_ABTS_UNSOL_INT       0
+	uint32_t word2;
+#define lpfc_abts_rxid_SHIFT      0
+#define lpfc_abts_rxid_MASK       0x0000FFFF
+#define lpfc_abts_rxid_WORD       word2
+#define lpfc_abts_oxid_SHIFT      16
+#define lpfc_abts_oxid_MASK       0x0000FFFF
+#define lpfc_abts_oxid_WORD       word2
+	uint32_t word3;
+#define lpfc_vndr_code_SHIFT	0
+#define lpfc_vndr_code_MASK	0x000000FF
+#define lpfc_vndr_code_WORD	word3
+#define lpfc_rsn_expln_SHIFT	8
+#define lpfc_rsn_expln_MASK	0x000000FF
+#define lpfc_rsn_expln_WORD	word3
+#define lpfc_rsn_code_SHIFT	16
+#define lpfc_rsn_code_MASK	0x000000FF
+#define lpfc_rsn_code_WORD	word3
+
+	uint32_t word4;
+	uint32_t word5_rsvd;	/* Word5 must be reserved */
+};
+
+/* event queue entry structure */
+struct lpfc_eqe {
+	uint32_t word0;
+#define lpfc_eqe_resource_id_SHIFT	16
+#define lpfc_eqe_resource_id_MASK	0x000000FF
+#define lpfc_eqe_resource_id_WORD	word0
+#define lpfc_eqe_minor_code_SHIFT	4
+#define lpfc_eqe_minor_code_MASK	0x00000FFF
+#define lpfc_eqe_minor_code_WORD	word0
+#define lpfc_eqe_major_code_SHIFT	1
+#define lpfc_eqe_major_code_MASK	0x00000007
+#define lpfc_eqe_major_code_WORD	word0
+#define lpfc_eqe_valid_SHIFT		0
+#define lpfc_eqe_valid_MASK		0x00000001
+#define lpfc_eqe_valid_WORD		word0
+};
+
+/* completion queue entry structure (common fields for all cqe types) */
+struct lpfc_cqe {
+	uint32_t reserved0;
+	uint32_t reserved1;
+	uint32_t reserved2;
+	uint32_t word3;
+#define lpfc_cqe_valid_SHIFT		31
+#define lpfc_cqe_valid_MASK		0x00000001
+#define lpfc_cqe_valid_WORD		word3
+#define lpfc_cqe_code_SHIFT		16
+#define lpfc_cqe_code_MASK		0x000000FF
+#define lpfc_cqe_code_WORD		word3
+};
+
+/* Completion Queue Entry Status Codes */
+#define CQE_STATUS_SUCCESS		0x0
+#define CQE_STATUS_FCP_RSP_FAILURE	0x1
+#define CQE_STATUS_REMOTE_STOP		0x2
+#define CQE_STATUS_LOCAL_REJECT		0x3
+#define CQE_STATUS_NPORT_RJT		0x4
+#define CQE_STATUS_FABRIC_RJT		0x5
+#define CQE_STATUS_NPORT_BSY		0x6
+#define CQE_STATUS_FABRIC_BSY		0x7
+#define CQE_STATUS_INTERMED_RSP		0x8
+#define CQE_STATUS_LS_RJT		0x9
+#define CQE_STATUS_CMD_REJECT		0xb
+#define CQE_STATUS_FCP_TGT_LENCHECK	0xc
+#define CQE_STATUS_NEED_BUFF_ENTRY	0xf
+#define CQE_STATUS_DI_ERROR		0x16
+
+/* Used when mapping CQE status to IOCB */
+#define LPFC_IOCB_STATUS_MASK		0xf
+
+/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */
+#define CQE_HW_STATUS_NO_ERR		0x0
+#define CQE_HW_STATUS_UNDERRUN		0x1
+#define CQE_HW_STATUS_OVERRUN		0x2
+
+/* Completion Queue Entry Codes */
+#define CQE_CODE_COMPL_WQE		0x1
+#define CQE_CODE_RELEASE_WQE		0x2
+#define CQE_CODE_RECEIVE		0x4
+#define CQE_CODE_XRI_ABORTED		0x5
+#define CQE_CODE_RECEIVE_V1		0x9
+
+/*
+ * Define mask value for xri_aborted and wcqe completed CQE extended status.
+ * Currently, extended status is limited to 9 bits (0x0 -> 0x103) .
+ */
+#define WCQE_PARAM_MASK		0x1FF;
+
+/* completion queue entry for wqe completions */
+struct lpfc_wcqe_complete {
+	uint32_t word0;
+#define lpfc_wcqe_c_request_tag_SHIFT	16
+#define lpfc_wcqe_c_request_tag_MASK	0x0000FFFF
+#define lpfc_wcqe_c_request_tag_WORD	word0
+#define lpfc_wcqe_c_status_SHIFT	8
+#define lpfc_wcqe_c_status_MASK		0x000000FF
+#define lpfc_wcqe_c_status_WORD		word0
+#define lpfc_wcqe_c_hw_status_SHIFT	0
+#define lpfc_wcqe_c_hw_status_MASK	0x000000FF
+#define lpfc_wcqe_c_hw_status_WORD	word0
+	uint32_t total_data_placed;
+	uint32_t parameter;
+#define lpfc_wcqe_c_bg_edir_SHIFT	5
+#define lpfc_wcqe_c_bg_edir_MASK	0x00000001
+#define lpfc_wcqe_c_bg_edir_WORD	parameter
+#define lpfc_wcqe_c_bg_tdpv_SHIFT	3
+#define lpfc_wcqe_c_bg_tdpv_MASK	0x00000001
+#define lpfc_wcqe_c_bg_tdpv_WORD	parameter
+#define lpfc_wcqe_c_bg_re_SHIFT		2
+#define lpfc_wcqe_c_bg_re_MASK		0x00000001
+#define lpfc_wcqe_c_bg_re_WORD		parameter
+#define lpfc_wcqe_c_bg_ae_SHIFT		1
+#define lpfc_wcqe_c_bg_ae_MASK		0x00000001
+#define lpfc_wcqe_c_bg_ae_WORD		parameter
+#define lpfc_wcqe_c_bg_ge_SHIFT		0
+#define lpfc_wcqe_c_bg_ge_MASK		0x00000001
+#define lpfc_wcqe_c_bg_ge_WORD		parameter
+	uint32_t word3;
+#define lpfc_wcqe_c_valid_SHIFT		lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_c_valid_MASK		lpfc_cqe_valid_MASK
+#define lpfc_wcqe_c_valid_WORD		lpfc_cqe_valid_WORD
+#define lpfc_wcqe_c_xb_SHIFT		28
+#define lpfc_wcqe_c_xb_MASK		0x00000001
+#define lpfc_wcqe_c_xb_WORD		word3
+#define lpfc_wcqe_c_pv_SHIFT		27
+#define lpfc_wcqe_c_pv_MASK		0x00000001
+#define lpfc_wcqe_c_pv_WORD		word3
+#define lpfc_wcqe_c_priority_SHIFT	24
+#define lpfc_wcqe_c_priority_MASK	0x00000007
+#define lpfc_wcqe_c_priority_WORD	word3
+#define lpfc_wcqe_c_code_SHIFT		lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_c_code_MASK		lpfc_cqe_code_MASK
+#define lpfc_wcqe_c_code_WORD		lpfc_cqe_code_WORD
+};
+
+/* completion queue entry for wqe release */
+struct lpfc_wcqe_release {
+	uint32_t reserved0;
+	uint32_t reserved1;
+	uint32_t word2;
+#define lpfc_wcqe_r_wq_id_SHIFT		16
+#define lpfc_wcqe_r_wq_id_MASK		0x0000FFFF
+#define lpfc_wcqe_r_wq_id_WORD		word2
+#define lpfc_wcqe_r_wqe_index_SHIFT	0
+#define lpfc_wcqe_r_wqe_index_MASK	0x0000FFFF
+#define lpfc_wcqe_r_wqe_index_WORD	word2
+	uint32_t word3;
+#define lpfc_wcqe_r_valid_SHIFT		lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_r_valid_MASK		lpfc_cqe_valid_MASK
+#define lpfc_wcqe_r_valid_WORD		lpfc_cqe_valid_WORD
+#define lpfc_wcqe_r_code_SHIFT		lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_r_code_MASK		lpfc_cqe_code_MASK
+#define lpfc_wcqe_r_code_WORD		lpfc_cqe_code_WORD
+};
+
+struct sli4_wcqe_xri_aborted {
+	uint32_t word0;
+#define lpfc_wcqe_xa_status_SHIFT		8
+#define lpfc_wcqe_xa_status_MASK		0x000000FF
+#define lpfc_wcqe_xa_status_WORD		word0
+	uint32_t parameter;
+	uint32_t word2;
+#define lpfc_wcqe_xa_remote_xid_SHIFT	16
+#define lpfc_wcqe_xa_remote_xid_MASK	0x0000FFFF
+#define lpfc_wcqe_xa_remote_xid_WORD	word2
+#define lpfc_wcqe_xa_xri_SHIFT		0
+#define lpfc_wcqe_xa_xri_MASK		0x0000FFFF
+#define lpfc_wcqe_xa_xri_WORD		word2
+	uint32_t word3;
+#define lpfc_wcqe_xa_valid_SHIFT	lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_xa_valid_MASK		lpfc_cqe_valid_MASK
+#define lpfc_wcqe_xa_valid_WORD		lpfc_cqe_valid_WORD
+#define lpfc_wcqe_xa_ia_SHIFT		30
+#define lpfc_wcqe_xa_ia_MASK		0x00000001
+#define lpfc_wcqe_xa_ia_WORD		word3
+#define CQE_XRI_ABORTED_IA_REMOTE	0
+#define CQE_XRI_ABORTED_IA_LOCAL	1
+#define lpfc_wcqe_xa_br_SHIFT		29
+#define lpfc_wcqe_xa_br_MASK		0x00000001
+#define lpfc_wcqe_xa_br_WORD		word3
+#define CQE_XRI_ABORTED_BR_BA_ACC	0
+#define CQE_XRI_ABORTED_BR_BA_RJT	1
+#define lpfc_wcqe_xa_eo_SHIFT		28
+#define lpfc_wcqe_xa_eo_MASK		0x00000001
+#define lpfc_wcqe_xa_eo_WORD		word3
+#define CQE_XRI_ABORTED_EO_REMOTE	0
+#define CQE_XRI_ABORTED_EO_LOCAL	1
+#define lpfc_wcqe_xa_code_SHIFT		lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_xa_code_MASK		lpfc_cqe_code_MASK
+#define lpfc_wcqe_xa_code_WORD		lpfc_cqe_code_WORD
+};
+
+/* completion queue entry structure for rqe completion */
+struct lpfc_rcqe {
+	uint32_t word0;
+#define lpfc_rcqe_bindex_SHIFT		16
+#define lpfc_rcqe_bindex_MASK		0x0000FFF
+#define lpfc_rcqe_bindex_WORD		word0
+#define lpfc_rcqe_status_SHIFT		8
+#define lpfc_rcqe_status_MASK		0x000000FF
+#define lpfc_rcqe_status_WORD		word0
+#define FC_STATUS_RQ_SUCCESS		0x10 /* Async receive successful */
+#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 	0x11 /* payload truncated */
+#define FC_STATUS_INSUFF_BUF_NEED_BUF 	0x12 /* Insufficient buffers */
+#define FC_STATUS_INSUFF_BUF_FRM_DISC 	0x13 /* Frame Discard */
+	uint32_t word1;
+#define lpfc_rcqe_fcf_id_v1_SHIFT	0
+#define lpfc_rcqe_fcf_id_v1_MASK	0x0000003F
+#define lpfc_rcqe_fcf_id_v1_WORD	word1
+	uint32_t word2;
+#define lpfc_rcqe_length_SHIFT		16
+#define lpfc_rcqe_length_MASK		0x0000FFFF
+#define lpfc_rcqe_length_WORD		word2
+#define lpfc_rcqe_rq_id_SHIFT		6
+#define lpfc_rcqe_rq_id_MASK		0x000003FF
+#define lpfc_rcqe_rq_id_WORD		word2
+#define lpfc_rcqe_fcf_id_SHIFT		0
+#define lpfc_rcqe_fcf_id_MASK		0x0000003F
+#define lpfc_rcqe_fcf_id_WORD		word2
+#define lpfc_rcqe_rq_id_v1_SHIFT	0
+#define lpfc_rcqe_rq_id_v1_MASK		0x0000FFFF
+#define lpfc_rcqe_rq_id_v1_WORD		word2
+	uint32_t word3;
+#define lpfc_rcqe_valid_SHIFT		lpfc_cqe_valid_SHIFT
+#define lpfc_rcqe_valid_MASK		lpfc_cqe_valid_MASK
+#define lpfc_rcqe_valid_WORD		lpfc_cqe_valid_WORD
+#define lpfc_rcqe_port_SHIFT		30
+#define lpfc_rcqe_port_MASK		0x00000001
+#define lpfc_rcqe_port_WORD		word3
+#define lpfc_rcqe_hdr_length_SHIFT	24
+#define lpfc_rcqe_hdr_length_MASK	0x0000001F
+#define lpfc_rcqe_hdr_length_WORD	word3
+#define lpfc_rcqe_code_SHIFT		lpfc_cqe_code_SHIFT
+#define lpfc_rcqe_code_MASK		lpfc_cqe_code_MASK
+#define lpfc_rcqe_code_WORD		lpfc_cqe_code_WORD
+#define lpfc_rcqe_eof_SHIFT		8
+#define lpfc_rcqe_eof_MASK		0x000000FF
+#define lpfc_rcqe_eof_WORD		word3
+#define FCOE_EOFn	0x41
+#define FCOE_EOFt	0x42
+#define FCOE_EOFni	0x49
+#define FCOE_EOFa	0x50
+#define lpfc_rcqe_sof_SHIFT		0
+#define lpfc_rcqe_sof_MASK		0x000000FF
+#define lpfc_rcqe_sof_WORD		word3
+#define FCOE_SOFi2	0x2d
+#define FCOE_SOFi3	0x2e
+#define FCOE_SOFn2	0x35
+#define FCOE_SOFn3	0x36
+};
+
+struct lpfc_rqe {
+	uint32_t address_hi;
+	uint32_t address_lo;
+};
+
+/* buffer descriptors */
+struct lpfc_bde4 {
+	uint32_t addr_hi;
+	uint32_t addr_lo;
+	uint32_t word2;
+#define lpfc_bde4_last_SHIFT		31
+#define lpfc_bde4_last_MASK		0x00000001
+#define lpfc_bde4_last_WORD		word2
+#define lpfc_bde4_sge_offset_SHIFT	0
+#define lpfc_bde4_sge_offset_MASK	0x000003FF
+#define lpfc_bde4_sge_offset_WORD	word2
+	uint32_t word3;
+#define lpfc_bde4_length_SHIFT		0
+#define lpfc_bde4_length_MASK		0x000000FF
+#define lpfc_bde4_length_WORD		word3
+};
+
+struct lpfc_register {
+	uint32_t word0;
+};
+
+/* The following BAR0 Registers apply to SLI4 if_type 0 UCNAs. */
+#define LPFC_UERR_STATUS_HI		0x00A4
+#define LPFC_UERR_STATUS_LO		0x00A0
+#define LPFC_UE_MASK_HI			0x00AC
+#define LPFC_UE_MASK_LO			0x00A8
+
+/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */
+#define LPFC_SLI_INTF			0x0058
+
+#define LPFC_CTL_PORT_SEM_OFFSET	0x400
+#define lpfc_port_smphr_perr_SHIFT	31
+#define lpfc_port_smphr_perr_MASK	0x1
+#define lpfc_port_smphr_perr_WORD	word0
+#define lpfc_port_smphr_sfi_SHIFT	30
+#define lpfc_port_smphr_sfi_MASK	0x1
+#define lpfc_port_smphr_sfi_WORD	word0
+#define lpfc_port_smphr_nip_SHIFT	29
+#define lpfc_port_smphr_nip_MASK	0x1
+#define lpfc_port_smphr_nip_WORD	word0
+#define lpfc_port_smphr_ipc_SHIFT	28
+#define lpfc_port_smphr_ipc_MASK	0x1
+#define lpfc_port_smphr_ipc_WORD	word0
+#define lpfc_port_smphr_scr1_SHIFT	27
+#define lpfc_port_smphr_scr1_MASK	0x1
+#define lpfc_port_smphr_scr1_WORD	word0
+#define lpfc_port_smphr_scr2_SHIFT	26
+#define lpfc_port_smphr_scr2_MASK	0x1
+#define lpfc_port_smphr_scr2_WORD	word0
+#define lpfc_port_smphr_host_scratch_SHIFT	16
+#define lpfc_port_smphr_host_scratch_MASK	0xFF
+#define lpfc_port_smphr_host_scratch_WORD	word0
+#define lpfc_port_smphr_port_status_SHIFT	0
+#define lpfc_port_smphr_port_status_MASK	0xFFFF
+#define lpfc_port_smphr_port_status_WORD	word0
+
+#define LPFC_POST_STAGE_POWER_ON_RESET			0x0000
+#define LPFC_POST_STAGE_AWAITING_HOST_RDY		0x0001
+#define LPFC_POST_STAGE_HOST_RDY			0x0002
+#define LPFC_POST_STAGE_BE_RESET			0x0003
+#define LPFC_POST_STAGE_SEEPROM_CS_START		0x0100
+#define LPFC_POST_STAGE_SEEPROM_CS_DONE			0x0101
+#define LPFC_POST_STAGE_DDR_CONFIG_START		0x0200
+#define LPFC_POST_STAGE_DDR_CONFIG_DONE			0x0201
+#define LPFC_POST_STAGE_DDR_CALIBRATE_START		0x0300
+#define LPFC_POST_STAGE_DDR_CALIBRATE_DONE		0x0301
+#define LPFC_POST_STAGE_DDR_TEST_START			0x0400
+#define LPFC_POST_STAGE_DDR_TEST_DONE			0x0401
+#define LPFC_POST_STAGE_REDBOOT_INIT_START		0x0600
+#define LPFC_POST_STAGE_REDBOOT_INIT_DONE		0x0601
+#define LPFC_POST_STAGE_FW_IMAGE_LOAD_START		0x0700
+#define LPFC_POST_STAGE_FW_IMAGE_LOAD_DONE		0x0701
+#define LPFC_POST_STAGE_ARMFW_START			0x0800
+#define LPFC_POST_STAGE_DHCP_QUERY_START		0x0900
+#define LPFC_POST_STAGE_DHCP_QUERY_DONE			0x0901
+#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_START	0x0A00
+#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_DONE	0x0A01
+#define LPFC_POST_STAGE_RC_OPTION_SET			0x0B00
+#define LPFC_POST_STAGE_SWITCH_LINK			0x0B01
+#define LPFC_POST_STAGE_SEND_ICDS_MESSAGE		0x0B02
+#define LPFC_POST_STAGE_PERFROM_TFTP			0x0B03
+#define LPFC_POST_STAGE_PARSE_XML			0x0B04
+#define LPFC_POST_STAGE_DOWNLOAD_IMAGE			0x0B05
+#define LPFC_POST_STAGE_FLASH_IMAGE			0x0B06
+#define LPFC_POST_STAGE_RC_DONE				0x0B07
+#define LPFC_POST_STAGE_REBOOT_SYSTEM			0x0B08
+#define LPFC_POST_STAGE_MAC_ADDRESS			0x0C00
+#define LPFC_POST_STAGE_PORT_READY			0xC000
+#define LPFC_POST_STAGE_PORT_UE 			0xF000
+
+#define LPFC_CTL_PORT_STA_OFFSET	0x404
+#define lpfc_sliport_status_err_SHIFT	31
+#define lpfc_sliport_status_err_MASK	0x1
+#define lpfc_sliport_status_err_WORD	word0
+#define lpfc_sliport_status_end_SHIFT	30
+#define lpfc_sliport_status_end_MASK	0x1
+#define lpfc_sliport_status_end_WORD	word0
+#define lpfc_sliport_status_oti_SHIFT	29
+#define lpfc_sliport_status_oti_MASK	0x1
+#define lpfc_sliport_status_oti_WORD	word0
+#define lpfc_sliport_status_rn_SHIFT	24
+#define lpfc_sliport_status_rn_MASK	0x1
+#define lpfc_sliport_status_rn_WORD	word0
+#define lpfc_sliport_status_rdy_SHIFT	23
+#define lpfc_sliport_status_rdy_MASK	0x1
+#define lpfc_sliport_status_rdy_WORD	word0
+#define MAX_IF_TYPE_2_RESETS	1000
+
+#define LPFC_CTL_PORT_CTL_OFFSET	0x408
+#define lpfc_sliport_ctrl_end_SHIFT	30
+#define lpfc_sliport_ctrl_end_MASK	0x1
+#define lpfc_sliport_ctrl_end_WORD	word0
+#define LPFC_SLIPORT_LITTLE_ENDIAN 0
+#define LPFC_SLIPORT_BIG_ENDIAN	   1
+#define lpfc_sliport_ctrl_ip_SHIFT	27
+#define lpfc_sliport_ctrl_ip_MASK	0x1
+#define lpfc_sliport_ctrl_ip_WORD	word0
+#define LPFC_SLIPORT_INIT_PORT	1
+
+#define LPFC_CTL_PORT_ER1_OFFSET	0x40C
+#define LPFC_CTL_PORT_ER2_OFFSET	0x410
+
+/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
+ * reside in BAR 2.
+ */
+#define LPFC_SLIPORT_IF0_SMPHR	0x00AC
+
+#define LPFC_IMR_MASK_ALL	0xFFFFFFFF
+#define LPFC_ISCR_CLEAR_ALL	0xFFFFFFFF
+
+#define LPFC_HST_ISR0		0x0C18
+#define LPFC_HST_ISR1		0x0C1C
+#define LPFC_HST_ISR2		0x0C20
+#define LPFC_HST_ISR3		0x0C24
+#define LPFC_HST_ISR4		0x0C28
+
+#define LPFC_HST_IMR0		0x0C48
+#define LPFC_HST_IMR1		0x0C4C
+#define LPFC_HST_IMR2		0x0C50
+#define LPFC_HST_IMR3		0x0C54
+#define LPFC_HST_IMR4		0x0C58
+
+#define LPFC_HST_ISCR0		0x0C78
+#define LPFC_HST_ISCR1		0x0C7C
+#define LPFC_HST_ISCR2		0x0C80
+#define LPFC_HST_ISCR3		0x0C84
+#define LPFC_HST_ISCR4		0x0C88
+
+#define LPFC_SLI4_INTR0			BIT0
+#define LPFC_SLI4_INTR1			BIT1
+#define LPFC_SLI4_INTR2			BIT2
+#define LPFC_SLI4_INTR3			BIT3
+#define LPFC_SLI4_INTR4			BIT4
+#define LPFC_SLI4_INTR5			BIT5
+#define LPFC_SLI4_INTR6			BIT6
+#define LPFC_SLI4_INTR7			BIT7
+#define LPFC_SLI4_INTR8			BIT8
+#define LPFC_SLI4_INTR9			BIT9
+#define LPFC_SLI4_INTR10		BIT10
+#define LPFC_SLI4_INTR11		BIT11
+#define LPFC_SLI4_INTR12		BIT12
+#define LPFC_SLI4_INTR13		BIT13
+#define LPFC_SLI4_INTR14		BIT14
+#define LPFC_SLI4_INTR15		BIT15
+#define LPFC_SLI4_INTR16		BIT16
+#define LPFC_SLI4_INTR17		BIT17
+#define LPFC_SLI4_INTR18		BIT18
+#define LPFC_SLI4_INTR19		BIT19
+#define LPFC_SLI4_INTR20		BIT20
+#define LPFC_SLI4_INTR21		BIT21
+#define LPFC_SLI4_INTR22		BIT22
+#define LPFC_SLI4_INTR23		BIT23
+#define LPFC_SLI4_INTR24		BIT24
+#define LPFC_SLI4_INTR25		BIT25
+#define LPFC_SLI4_INTR26		BIT26
+#define LPFC_SLI4_INTR27		BIT27
+#define LPFC_SLI4_INTR28		BIT28
+#define LPFC_SLI4_INTR29		BIT29
+#define LPFC_SLI4_INTR30		BIT30
+#define LPFC_SLI4_INTR31		BIT31
+
+/*
+ * The Doorbell registers defined here exist in different BAR
+ * register sets depending on the UCNA Port's reported if_type
+ * value.  For UCNA ports running SLI4 and if_type 0, they reside in
+ * BAR4.  For UCNA ports running SLI4 and if_type 2, they reside in
+ * BAR0.  The offsets are the same so the driver must account for
+ * any base address difference.
+ */
+#define LPFC_RQ_DOORBELL		0x00A0
+#define lpfc_rq_doorbell_num_posted_SHIFT	16
+#define lpfc_rq_doorbell_num_posted_MASK	0x3FFF
+#define lpfc_rq_doorbell_num_posted_WORD	word0
+#define lpfc_rq_doorbell_id_SHIFT		0
+#define lpfc_rq_doorbell_id_MASK		0xFFFF
+#define lpfc_rq_doorbell_id_WORD		word0
+
+#define LPFC_WQ_DOORBELL		0x0040
+#define lpfc_wq_doorbell_num_posted_SHIFT	24
+#define lpfc_wq_doorbell_num_posted_MASK	0x00FF
+#define lpfc_wq_doorbell_num_posted_WORD	word0
+#define lpfc_wq_doorbell_index_SHIFT		16
+#define lpfc_wq_doorbell_index_MASK		0x00FF
+#define lpfc_wq_doorbell_index_WORD		word0
+#define lpfc_wq_doorbell_id_SHIFT		0
+#define lpfc_wq_doorbell_id_MASK		0xFFFF
+#define lpfc_wq_doorbell_id_WORD		word0
+
+#define LPFC_EQCQ_DOORBELL		0x0120
+#define lpfc_eqcq_doorbell_se_SHIFT		31
+#define lpfc_eqcq_doorbell_se_MASK		0x0001
+#define lpfc_eqcq_doorbell_se_WORD		word0
+#define LPFC_EQCQ_SOLICIT_ENABLE_OFF	0
+#define LPFC_EQCQ_SOLICIT_ENABLE_ON	1
+#define lpfc_eqcq_doorbell_arm_SHIFT		29
+#define lpfc_eqcq_doorbell_arm_MASK		0x0001
+#define lpfc_eqcq_doorbell_arm_WORD		word0
+#define lpfc_eqcq_doorbell_num_released_SHIFT	16
+#define lpfc_eqcq_doorbell_num_released_MASK	0x1FFF
+#define lpfc_eqcq_doorbell_num_released_WORD	word0
+#define lpfc_eqcq_doorbell_qt_SHIFT		10
+#define lpfc_eqcq_doorbell_qt_MASK		0x0001
+#define lpfc_eqcq_doorbell_qt_WORD		word0
+#define LPFC_QUEUE_TYPE_COMPLETION	0
+#define LPFC_QUEUE_TYPE_EVENT		1
+#define lpfc_eqcq_doorbell_eqci_SHIFT		9
+#define lpfc_eqcq_doorbell_eqci_MASK		0x0001
+#define lpfc_eqcq_doorbell_eqci_WORD		word0
+#define lpfc_eqcq_doorbell_cqid_lo_SHIFT	0
+#define lpfc_eqcq_doorbell_cqid_lo_MASK		0x03FF
+#define lpfc_eqcq_doorbell_cqid_lo_WORD		word0
+#define lpfc_eqcq_doorbell_cqid_hi_SHIFT	11
+#define lpfc_eqcq_doorbell_cqid_hi_MASK		0x001F
+#define lpfc_eqcq_doorbell_cqid_hi_WORD		word0
+#define lpfc_eqcq_doorbell_eqid_lo_SHIFT	0
+#define lpfc_eqcq_doorbell_eqid_lo_MASK		0x01FF
+#define lpfc_eqcq_doorbell_eqid_lo_WORD		word0
+#define lpfc_eqcq_doorbell_eqid_hi_SHIFT	11
+#define lpfc_eqcq_doorbell_eqid_hi_MASK		0x001F
+#define lpfc_eqcq_doorbell_eqid_hi_WORD		word0
+#define LPFC_CQID_HI_FIELD_SHIFT		10
+#define LPFC_EQID_HI_FIELD_SHIFT		9
+
+#define LPFC_BMBX			0x0160
+#define lpfc_bmbx_addr_SHIFT		2
+#define lpfc_bmbx_addr_MASK		0x3FFFFFFF
+#define lpfc_bmbx_addr_WORD		word0
+#define lpfc_bmbx_hi_SHIFT		1
+#define lpfc_bmbx_hi_MASK		0x0001
+#define lpfc_bmbx_hi_WORD		word0
+#define lpfc_bmbx_rdy_SHIFT		0
+#define lpfc_bmbx_rdy_MASK		0x0001
+#define lpfc_bmbx_rdy_WORD		word0
+
+#define LPFC_MQ_DOORBELL			0x0140
+#define lpfc_mq_doorbell_num_posted_SHIFT	16
+#define lpfc_mq_doorbell_num_posted_MASK	0x3FFF
+#define lpfc_mq_doorbell_num_posted_WORD	word0
+#define lpfc_mq_doorbell_id_SHIFT		0
+#define lpfc_mq_doorbell_id_MASK		0xFFFF
+#define lpfc_mq_doorbell_id_WORD		word0
+
+struct lpfc_sli4_cfg_mhdr {
+	uint32_t word1;
+#define lpfc_mbox_hdr_emb_SHIFT		0
+#define lpfc_mbox_hdr_emb_MASK		0x00000001
+#define lpfc_mbox_hdr_emb_WORD		word1
+#define lpfc_mbox_hdr_sge_cnt_SHIFT	3
+#define lpfc_mbox_hdr_sge_cnt_MASK	0x0000001F
+#define lpfc_mbox_hdr_sge_cnt_WORD	word1
+	uint32_t payload_length;
+	uint32_t tag_lo;
+	uint32_t tag_hi;
+	uint32_t reserved5;
+};
+
+union lpfc_sli4_cfg_shdr {
+	struct {
+		uint32_t word6;
+#define lpfc_mbox_hdr_opcode_SHIFT	0
+#define lpfc_mbox_hdr_opcode_MASK	0x000000FF
+#define lpfc_mbox_hdr_opcode_WORD	word6
+#define lpfc_mbox_hdr_subsystem_SHIFT	8
+#define lpfc_mbox_hdr_subsystem_MASK	0x000000FF
+#define lpfc_mbox_hdr_subsystem_WORD	word6
+#define lpfc_mbox_hdr_port_number_SHIFT	16
+#define lpfc_mbox_hdr_port_number_MASK	0x000000FF
+#define lpfc_mbox_hdr_port_number_WORD	word6
+#define lpfc_mbox_hdr_domain_SHIFT	24
+#define lpfc_mbox_hdr_domain_MASK	0x000000FF
+#define lpfc_mbox_hdr_domain_WORD	word6
+		uint32_t timeout;
+		uint32_t request_length;
+		uint32_t word9;
+#define lpfc_mbox_hdr_version_SHIFT	0
+#define lpfc_mbox_hdr_version_MASK	0x000000FF
+#define lpfc_mbox_hdr_version_WORD	word9
+#define lpfc_mbox_hdr_pf_num_SHIFT	16
+#define lpfc_mbox_hdr_pf_num_MASK	0x000000FF
+#define lpfc_mbox_hdr_pf_num_WORD	word9
+#define lpfc_mbox_hdr_vh_num_SHIFT	24
+#define lpfc_mbox_hdr_vh_num_MASK	0x000000FF
+#define lpfc_mbox_hdr_vh_num_WORD	word9
+#define LPFC_Q_CREATE_VERSION_2	2
+#define LPFC_Q_CREATE_VERSION_1	1
+#define LPFC_Q_CREATE_VERSION_0	0
+#define LPFC_OPCODE_VERSION_0	0
+#define LPFC_OPCODE_VERSION_1	1
+	} request;
+	struct {
+		uint32_t word6;
+#define lpfc_mbox_hdr_opcode_SHIFT		0
+#define lpfc_mbox_hdr_opcode_MASK		0x000000FF
+#define lpfc_mbox_hdr_opcode_WORD		word6
+#define lpfc_mbox_hdr_subsystem_SHIFT		8
+#define lpfc_mbox_hdr_subsystem_MASK		0x000000FF
+#define lpfc_mbox_hdr_subsystem_WORD		word6
+#define lpfc_mbox_hdr_domain_SHIFT		24
+#define lpfc_mbox_hdr_domain_MASK		0x000000FF
+#define lpfc_mbox_hdr_domain_WORD		word6
+		uint32_t word7;
+#define lpfc_mbox_hdr_status_SHIFT		0
+#define lpfc_mbox_hdr_status_MASK		0x000000FF
+#define lpfc_mbox_hdr_status_WORD		word7
+#define lpfc_mbox_hdr_add_status_SHIFT		8
+#define lpfc_mbox_hdr_add_status_MASK		0x000000FF
+#define lpfc_mbox_hdr_add_status_WORD		word7
+		uint32_t response_length;
+		uint32_t actual_response_length;
+	} response;
+};
+
+/* Mailbox Header structures.
+ * struct mbox_header is defined for first generation SLI4_CFG mailbox
+ * calls deployed for BE-based ports.
+ *
+ * struct sli4_mbox_header is defined for second generation SLI4
+ * ports that don't deploy the SLI4_CFG mechanism.
+ */
+struct mbox_header {
+	struct lpfc_sli4_cfg_mhdr cfg_mhdr;
+	union  lpfc_sli4_cfg_shdr cfg_shdr;
+};
+
+#define LPFC_EXTENT_LOCAL		0
+#define LPFC_TIMEOUT_DEFAULT		0
+#define LPFC_EXTENT_VERSION_DEFAULT	0
+
+/* Subsystem Definitions */
+#define LPFC_MBOX_SUBSYSTEM_NA		0x0
+#define LPFC_MBOX_SUBSYSTEM_COMMON	0x1
+#define LPFC_MBOX_SUBSYSTEM_FCOE	0xC
+
+/* Device Specific Definitions */
+
+/* The HOST ENDIAN defines are in Big Endian format. */
+#define HOST_ENDIAN_LOW_WORD0   0xFF3412FF
+#define HOST_ENDIAN_HIGH_WORD1	0xFF7856FF
+
+/* Common Opcodes */
+#define LPFC_MBOX_OPCODE_NA				0x00
+#define LPFC_MBOX_OPCODE_CQ_CREATE			0x0C
+#define LPFC_MBOX_OPCODE_EQ_CREATE			0x0D
+#define LPFC_MBOX_OPCODE_MQ_CREATE			0x15
+#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES		0x20
+#define LPFC_MBOX_OPCODE_NOP				0x21
+#define LPFC_MBOX_OPCODE_MQ_DESTROY			0x35
+#define LPFC_MBOX_OPCODE_CQ_DESTROY			0x36
+#define LPFC_MBOX_OPCODE_EQ_DESTROY			0x37
+#define LPFC_MBOX_OPCODE_QUERY_FW_CFG			0x3A
+#define LPFC_MBOX_OPCODE_FUNCTION_RESET			0x3D
+#define LPFC_MBOX_OPCODE_GET_PORT_NAME			0x4D
+#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT			0x5A
+#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO		0x9A
+#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT		0x9B
+#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT		0x9C
+#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT		0x9D
+#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG		0xA0
+#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG		0xA4
+#define LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG		0xA5
+#define LPFC_MBOX_OPCODE_GET_PROFILE_LIST		0xA6
+#define LPFC_MBOX_OPCODE_SET_ACT_PROFILE		0xA8
+#define LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG	0xA9
+#define LPFC_MBOX_OPCODE_READ_OBJECT			0xAB
+#define LPFC_MBOX_OPCODE_WRITE_OBJECT			0xAC
+#define LPFC_MBOX_OPCODE_READ_OBJECT_LIST		0xAD
+#define LPFC_MBOX_OPCODE_DELETE_OBJECT			0xAE
+#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS		0xB5
+
+/* FCoE Opcodes */
+#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE			0x01
+#define LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY		0x02
+#define LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES		0x03
+#define LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES		0x04
+#define LPFC_MBOX_OPCODE_FCOE_RQ_CREATE			0x05
+#define LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY		0x06
+#define LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE		0x08
+#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF			0x09
+#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF		0x0A
+#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE		0x0B
+#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF		0x10
+#define LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS	0x21
+#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE		0x22
+#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK	0x23
+
+/* Mailbox command structures */
+struct eq_context {
+	uint32_t word0;
+#define lpfc_eq_context_size_SHIFT	31
+#define lpfc_eq_context_size_MASK	0x00000001
+#define lpfc_eq_context_size_WORD	word0
+#define LPFC_EQE_SIZE_4			0x0
+#define LPFC_EQE_SIZE_16		0x1
+#define lpfc_eq_context_valid_SHIFT	29
+#define lpfc_eq_context_valid_MASK	0x00000001
+#define lpfc_eq_context_valid_WORD	word0
+	uint32_t word1;
+#define lpfc_eq_context_count_SHIFT	26
+#define lpfc_eq_context_count_MASK	0x00000003
+#define lpfc_eq_context_count_WORD	word1
+#define LPFC_EQ_CNT_256		0x0
+#define LPFC_EQ_CNT_512		0x1
+#define LPFC_EQ_CNT_1024	0x2
+#define LPFC_EQ_CNT_2048	0x3
+#define LPFC_EQ_CNT_4096	0x4
+	uint32_t word2;
+#define lpfc_eq_context_delay_multi_SHIFT	13
+#define lpfc_eq_context_delay_multi_MASK	0x000003FF
+#define lpfc_eq_context_delay_multi_WORD	word2
+	uint32_t reserved3;
+};
+
+struct sgl_page_pairs {
+	uint32_t sgl_pg0_addr_lo;
+	uint32_t sgl_pg0_addr_hi;
+	uint32_t sgl_pg1_addr_lo;
+	uint32_t sgl_pg1_addr_hi;
+};
+
+struct lpfc_mbx_post_sgl_pages {
+	struct mbox_header header;
+	uint32_t word0;
+#define lpfc_post_sgl_pages_xri_SHIFT	0
+#define lpfc_post_sgl_pages_xri_MASK	0x0000FFFF
+#define lpfc_post_sgl_pages_xri_WORD	word0
+#define lpfc_post_sgl_pages_xricnt_SHIFT	16
+#define lpfc_post_sgl_pages_xricnt_MASK	0x0000FFFF
+#define lpfc_post_sgl_pages_xricnt_WORD	word0
+	struct sgl_page_pairs  sgl_pg_pairs[1];
+};
+
+/* word0 of page-1 struct shares the same SHIFT/MASK/WORD defines as above */
+struct lpfc_mbx_post_uembed_sgl_page1 {
+	union  lpfc_sli4_cfg_shdr cfg_shdr;
+	uint32_t word0;
+	struct sgl_page_pairs sgl_pg_pairs;
+};
+
+struct lpfc_mbx_sge {
+	uint32_t pa_lo;
+	uint32_t pa_hi;
+	uint32_t length;
+};
+
+struct lpfc_mbx_nembed_cmd {
+	struct lpfc_sli4_cfg_mhdr cfg_mhdr;
+#define LPFC_SLI4_MBX_SGE_MAX_PAGES	19
+	struct lpfc_mbx_sge sge[LPFC_SLI4_MBX_SGE_MAX_PAGES];
+};
+
+struct lpfc_mbx_nembed_sge_virt {
+	void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES];
+};
+
+struct lpfc_mbx_eq_create {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_eq_create_num_pages_SHIFT	0
+#define lpfc_mbx_eq_create_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_eq_create_num_pages_WORD	word0
+			struct eq_context context;
+			struct dma_address page[LPFC_MAX_EQ_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_eq_create_q_id_SHIFT	0
+#define lpfc_mbx_eq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_eq_create_q_id_WORD	word0
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_eq_destroy {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_eq_destroy_q_id_SHIFT	0
+#define lpfc_mbx_eq_destroy_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_eq_destroy_q_id_WORD	word0
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_nop {
+	struct mbox_header header;
+	uint32_t context[2];
+};
+
+struct cq_context {
+	uint32_t word0;
+#define lpfc_cq_context_event_SHIFT	31
+#define lpfc_cq_context_event_MASK	0x00000001
+#define lpfc_cq_context_event_WORD	word0
+#define lpfc_cq_context_valid_SHIFT	29
+#define lpfc_cq_context_valid_MASK	0x00000001
+#define lpfc_cq_context_valid_WORD	word0
+#define lpfc_cq_context_count_SHIFT	27
+#define lpfc_cq_context_count_MASK	0x00000003
+#define lpfc_cq_context_count_WORD	word0
+#define LPFC_CQ_CNT_256		0x0
+#define LPFC_CQ_CNT_512		0x1
+#define LPFC_CQ_CNT_1024	0x2
+	uint32_t word1;
+#define lpfc_cq_eq_id_SHIFT		22	/* Version 0 Only */
+#define lpfc_cq_eq_id_MASK		0x000000FF
+#define lpfc_cq_eq_id_WORD		word1
+#define lpfc_cq_eq_id_2_SHIFT		0 	/* Version 2 Only */
+#define lpfc_cq_eq_id_2_MASK		0x0000FFFF
+#define lpfc_cq_eq_id_2_WORD		word1
+	uint32_t reserved0;
+	uint32_t reserved1;
+};
+
+struct lpfc_mbx_cq_create {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_cq_create_page_size_SHIFT	16	/* Version 2 Only */
+#define lpfc_mbx_cq_create_page_size_MASK	0x000000FF
+#define lpfc_mbx_cq_create_page_size_WORD	word0
+#define lpfc_mbx_cq_create_num_pages_SHIFT	0
+#define lpfc_mbx_cq_create_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_num_pages_WORD	word0
+			struct cq_context context;
+			struct dma_address page[LPFC_MAX_CQ_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_cq_create_q_id_SHIFT	0
+#define lpfc_mbx_cq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_cq_create_q_id_WORD	word0
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_cq_destroy {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_cq_destroy_q_id_SHIFT	0
+#define lpfc_mbx_cq_destroy_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_cq_destroy_q_id_WORD	word0
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+struct wq_context {
+	uint32_t reserved0;
+	uint32_t reserved1;
+	uint32_t reserved2;
+	uint32_t reserved3;
+};
+
+struct lpfc_mbx_wq_create {
+	struct mbox_header header;
+	union {
+		struct {	/* Version 0 Request */
+			uint32_t word0;
+#define lpfc_mbx_wq_create_num_pages_SHIFT	0
+#define lpfc_mbx_wq_create_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_wq_create_num_pages_WORD	word0
+#define lpfc_mbx_wq_create_cq_id_SHIFT		16
+#define lpfc_mbx_wq_create_cq_id_MASK		0x0000FFFF
+#define lpfc_mbx_wq_create_cq_id_WORD		word0
+			struct dma_address page[LPFC_MAX_WQ_PAGE];
+		} request;
+		struct {	/* Version 1 Request */
+			uint32_t word0;	/* Word 0 is the same as in v0 */
+			uint32_t word1;
+#define lpfc_mbx_wq_create_page_size_SHIFT	0
+#define lpfc_mbx_wq_create_page_size_MASK	0x000000FF
+#define lpfc_mbx_wq_create_page_size_WORD	word1
+#define lpfc_mbx_wq_create_wqe_size_SHIFT	8
+#define lpfc_mbx_wq_create_wqe_size_MASK	0x0000000F
+#define lpfc_mbx_wq_create_wqe_size_WORD	word1
+#define LPFC_WQ_WQE_SIZE_64	0x5
+#define LPFC_WQ_WQE_SIZE_128	0x6
+#define lpfc_mbx_wq_create_wqe_count_SHIFT	16
+#define lpfc_mbx_wq_create_wqe_count_MASK	0x0000FFFF
+#define lpfc_mbx_wq_create_wqe_count_WORD	word1
+			uint32_t word2;
+			struct dma_address page[LPFC_MAX_WQ_PAGE-1];
+		} request_1;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_wq_create_q_id_SHIFT	0
+#define lpfc_mbx_wq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_wq_create_q_id_WORD	word0
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_wq_destroy {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_wq_destroy_q_id_SHIFT	0
+#define lpfc_mbx_wq_destroy_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_wq_destroy_q_id_WORD	word0
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+#define LPFC_HDR_BUF_SIZE 128
+#define LPFC_DATA_BUF_SIZE 2048
+struct rq_context {
+	uint32_t word0;
+#define lpfc_rq_context_rqe_count_SHIFT	16	/* Version 0 Only */
+#define lpfc_rq_context_rqe_count_MASK	0x0000000F
+#define lpfc_rq_context_rqe_count_WORD	word0
+#define LPFC_RQ_RING_SIZE_512		9	/* 512 entries */
+#define LPFC_RQ_RING_SIZE_1024		10	/* 1024 entries */
+#define LPFC_RQ_RING_SIZE_2048		11	/* 2048 entries */
+#define LPFC_RQ_RING_SIZE_4096		12	/* 4096 entries */
+#define lpfc_rq_context_rqe_count_1_SHIFT	16	/* Version 1 Only */
+#define lpfc_rq_context_rqe_count_1_MASK	0x0000FFFF
+#define lpfc_rq_context_rqe_count_1_WORD	word0
+#define lpfc_rq_context_rqe_size_SHIFT	8		/* Version 1 Only */
+#define lpfc_rq_context_rqe_size_MASK	0x0000000F
+#define lpfc_rq_context_rqe_size_WORD	word0
+#define LPFC_RQE_SIZE_8		2
+#define LPFC_RQE_SIZE_16	3
+#define LPFC_RQE_SIZE_32	4
+#define LPFC_RQE_SIZE_64	5
+#define LPFC_RQE_SIZE_128	6
+#define lpfc_rq_context_page_size_SHIFT	0		/* Version 1 Only */
+#define lpfc_rq_context_page_size_MASK	0x000000FF
+#define lpfc_rq_context_page_size_WORD	word0
+	uint32_t reserved1;
+	uint32_t word2;
+#define lpfc_rq_context_cq_id_SHIFT	16
+#define lpfc_rq_context_cq_id_MASK	0x000003FF
+#define lpfc_rq_context_cq_id_WORD	word2
+#define lpfc_rq_context_buf_size_SHIFT	0
+#define lpfc_rq_context_buf_size_MASK	0x0000FFFF
+#define lpfc_rq_context_buf_size_WORD	word2
+	uint32_t buffer_size;				/* Version 1 Only */
+};
+
+struct lpfc_mbx_rq_create {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_rq_create_num_pages_SHIFT	0
+#define lpfc_mbx_rq_create_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_rq_create_num_pages_WORD	word0
+			struct rq_context context;
+			struct dma_address page[LPFC_MAX_WQ_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_rq_create_q_id_SHIFT	0
+#define lpfc_mbx_rq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_rq_create_q_id_WORD	word0
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_rq_destroy {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_rq_destroy_q_id_SHIFT	0
+#define lpfc_mbx_rq_destroy_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_rq_destroy_q_id_WORD	word0
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+struct mq_context {
+	uint32_t word0;
+#define lpfc_mq_context_cq_id_SHIFT	22 	/* Version 0 Only */
+#define lpfc_mq_context_cq_id_MASK	0x000003FF
+#define lpfc_mq_context_cq_id_WORD	word0
+#define lpfc_mq_context_ring_size_SHIFT	16
+#define lpfc_mq_context_ring_size_MASK	0x0000000F
+#define lpfc_mq_context_ring_size_WORD	word0
+#define LPFC_MQ_RING_SIZE_16		0x5
+#define LPFC_MQ_RING_SIZE_32		0x6
+#define LPFC_MQ_RING_SIZE_64		0x7
+#define LPFC_MQ_RING_SIZE_128		0x8
+	uint32_t word1;
+#define lpfc_mq_context_valid_SHIFT	31
+#define lpfc_mq_context_valid_MASK	0x00000001
+#define lpfc_mq_context_valid_WORD	word1
+	uint32_t reserved2;
+	uint32_t reserved3;
+};
+
+struct lpfc_mbx_mq_create {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_mq_create_num_pages_SHIFT	0
+#define lpfc_mbx_mq_create_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_mq_create_num_pages_WORD	word0
+			struct mq_context context;
+			struct dma_address page[LPFC_MAX_MQ_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_mq_create_q_id_SHIFT	0
+#define lpfc_mbx_mq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_mq_create_q_id_WORD	word0
+		} response;
+	} u;
+};
+
+struct lpfc_mbx_mq_create_ext {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_mq_create_ext_num_pages_SHIFT	0
+#define lpfc_mbx_mq_create_ext_num_pages_MASK	0x0000FFFF
+#define lpfc_mbx_mq_create_ext_num_pages_WORD	word0
+#define lpfc_mbx_mq_create_ext_cq_id_SHIFT	16	/* Version 1 Only */
+#define lpfc_mbx_mq_create_ext_cq_id_MASK	0x0000FFFF
+#define lpfc_mbx_mq_create_ext_cq_id_WORD	word0
+			uint32_t async_evt_bmap;
+#define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT	LPFC_TRAILER_CODE_LINK
+#define lpfc_mbx_mq_create_ext_async_evt_link_MASK	0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_link_WORD	async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_fip_SHIFT	LPFC_TRAILER_CODE_FCOE
+#define lpfc_mbx_mq_create_ext_async_evt_fip_MASK	0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_fip_WORD	async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_group5_SHIFT	LPFC_TRAILER_CODE_GRP5
+#define lpfc_mbx_mq_create_ext_async_evt_group5_MASK	0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_group5_WORD	async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_fc_SHIFT	LPFC_TRAILER_CODE_FC
+#define lpfc_mbx_mq_create_ext_async_evt_fc_MASK	0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_fc_WORD	async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_sli_SHIFT	LPFC_TRAILER_CODE_SLI
+#define lpfc_mbx_mq_create_ext_async_evt_sli_MASK	0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_sli_WORD	async_evt_bmap
+			struct mq_context context;
+			struct dma_address page[LPFC_MAX_MQ_PAGE];
+		} request;
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_mq_create_q_id_SHIFT	0
+#define lpfc_mbx_mq_create_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_mq_create_q_id_WORD	word0
+		} response;
+	} u;
+#define LPFC_ASYNC_EVENT_LINK_STATE	0x2
+#define LPFC_ASYNC_EVENT_FCF_STATE	0x4
+#define LPFC_ASYNC_EVENT_GROUP5		0x20
+};
+
+struct lpfc_mbx_mq_destroy {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_mq_destroy_q_id_SHIFT	0
+#define lpfc_mbx_mq_destroy_q_id_MASK	0x0000FFFF
+#define lpfc_mbx_mq_destroy_q_id_WORD	word0
+		} request;
+		struct {
+			uint32_t word0;
+		} response;
+	} u;
+};
+
+/* Start Gen 2 SLI4 Mailbox definitions: */
+
+/* Define allocate-ready Gen 2 SLI4 FCoE Resource Extent Types. */
+#define LPFC_RSC_TYPE_FCOE_VFI	0x20
+#define LPFC_RSC_TYPE_FCOE_VPI	0x21
+#define LPFC_RSC_TYPE_FCOE_RPI	0x22
+#define LPFC_RSC_TYPE_FCOE_XRI	0x23
+
+struct lpfc_mbx_get_rsrc_extent_info {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word4;
+#define lpfc_mbx_get_rsrc_extent_info_type_SHIFT	0
+#define lpfc_mbx_get_rsrc_extent_info_type_MASK		0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_type_WORD		word4
+		} req;
+		struct {
+			uint32_t word4;
+#define lpfc_mbx_get_rsrc_extent_info_cnt_SHIFT		0
+#define lpfc_mbx_get_rsrc_extent_info_cnt_MASK		0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_cnt_WORD		word4
+#define lpfc_mbx_get_rsrc_extent_info_size_SHIFT	16
+#define lpfc_mbx_get_rsrc_extent_info_size_MASK		0x0000FFFF
+#define lpfc_mbx_get_rsrc_extent_info_size_WORD		word4
+		} rsp;
+	} u;
+};
+
+struct lpfc_id_range {
+	uint32_t word5;
+#define lpfc_mbx_rsrc_id_word4_0_SHIFT	0
+#define lpfc_mbx_rsrc_id_word4_0_MASK	0x0000FFFF
+#define lpfc_mbx_rsrc_id_word4_0_WORD	word5
+#define lpfc_mbx_rsrc_id_word4_1_SHIFT	16
+#define lpfc_mbx_rsrc_id_word4_1_MASK	0x0000FFFF
+#define lpfc_mbx_rsrc_id_word4_1_WORD	word5
+};
+
+struct lpfc_mbx_set_link_diag_state {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_set_diag_state_diag_SHIFT	0
+#define lpfc_mbx_set_diag_state_diag_MASK	0x00000001
+#define lpfc_mbx_set_diag_state_diag_WORD	word0
+#define lpfc_mbx_set_diag_state_link_num_SHIFT	16
+#define lpfc_mbx_set_diag_state_link_num_MASK	0x0000003F
+#define lpfc_mbx_set_diag_state_link_num_WORD	word0
+#define lpfc_mbx_set_diag_state_link_type_SHIFT 22
+#define lpfc_mbx_set_diag_state_link_type_MASK	0x00000003
+#define lpfc_mbx_set_diag_state_link_type_WORD	word0
+		} req;
+		struct {
+			uint32_t word0;
+		} rsp;
+	} u;
+};
+
+struct lpfc_mbx_set_link_diag_loopback {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_set_diag_lpbk_type_SHIFT	0
+#define lpfc_mbx_set_diag_lpbk_type_MASK	0x00000003
+#define lpfc_mbx_set_diag_lpbk_type_WORD	word0
+#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE		0x0
+#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL	0x1
+#define LPFC_DIAG_LOOPBACK_TYPE_SERDES		0x2
+#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT	16
+#define lpfc_mbx_set_diag_lpbk_link_num_MASK	0x0000003F
+#define lpfc_mbx_set_diag_lpbk_link_num_WORD	word0
+#define lpfc_mbx_set_diag_lpbk_link_type_SHIFT	22
+#define lpfc_mbx_set_diag_lpbk_link_type_MASK	0x00000003
+#define lpfc_mbx_set_diag_lpbk_link_type_WORD	word0
+		} req;
+		struct {
+			uint32_t word0;
+		} rsp;
+	} u;
+};
+
+struct lpfc_mbx_run_link_diag_test {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word0;
+#define lpfc_mbx_run_diag_test_link_num_SHIFT	16
+#define lpfc_mbx_run_diag_test_link_num_MASK	0x0000003F
+#define lpfc_mbx_run_diag_test_link_num_WORD	word0
+#define lpfc_mbx_run_diag_test_link_type_SHIFT	22
+#define lpfc_mbx_run_diag_test_link_type_MASK	0x00000003
+#define lpfc_mbx_run_diag_test_link_type_WORD	word0
+			uint32_t word1;
+#define lpfc_mbx_run_diag_test_test_id_SHIFT	0
+#define lpfc_mbx_run_diag_test_test_id_MASK	0x0000FFFF
+#define lpfc_mbx_run_diag_test_test_id_WORD	word1
+#define lpfc_mbx_run_diag_test_loops_SHIFT	16
+#define lpfc_mbx_run_diag_test_loops_MASK	0x0000FFFF
+#define lpfc_mbx_run_diag_test_loops_WORD	word1
+			uint32_t word2;
+#define lpfc_mbx_run_diag_test_test_ver_SHIFT	0
+#define lpfc_mbx_run_diag_test_test_ver_MASK	0x0000FFFF
+#define lpfc_mbx_run_diag_test_test_ver_WORD	word2
+#define lpfc_mbx_run_diag_test_err_act_SHIFT	16
+#define lpfc_mbx_run_diag_test_err_act_MASK	0x000000FF
+#define lpfc_mbx_run_diag_test_err_act_WORD	word2
+		} req;
+		struct {
+			uint32_t word0;
+		} rsp;
+	} u;
+};
+
+/*
+ * struct lpfc_mbx_alloc_rsrc_extents:
+ * A mbox is generically 256 bytes long. An SLI4_CONFIG mailbox requires
+ * 6 words of header + 4 words of shared subcommand header +
+ * 1 words of Extent-Opcode-specific header = 11 words or 44 bytes total.
+ *
+ * An embedded version of SLI4_CONFIG therefore has 256 - 44 = 212 bytes
+ * for extents payload.
+ *
+ * 212/2 (bytes per extent) = 106 extents.
+ * 106/2 (extents per word) = 53 words.
+ * lpfc_id_range id is statically size to 53.
+ *
+ * This mailbox definition is used for ALLOC or GET_ALLOCATED
+ * extent ranges.  For ALLOC, the type and cnt are required.
+ * For GET_ALLOCATED, only the type is required.
+ */
+struct lpfc_mbx_alloc_rsrc_extents {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word4;
+#define lpfc_mbx_alloc_rsrc_extents_type_SHIFT	0
+#define lpfc_mbx_alloc_rsrc_extents_type_MASK	0x0000FFFF
+#define lpfc_mbx_alloc_rsrc_extents_type_WORD	word4
+#define lpfc_mbx_alloc_rsrc_extents_cnt_SHIFT	16
+#define lpfc_mbx_alloc_rsrc_extents_cnt_MASK	0x0000FFFF
+#define lpfc_mbx_alloc_rsrc_extents_cnt_WORD	word4
+		} req;
+		struct {
+			uint32_t word4;
+#define lpfc_mbx_rsrc_cnt_SHIFT	0
+#define lpfc_mbx_rsrc_cnt_MASK	0x0000FFFF
+#define lpfc_mbx_rsrc_cnt_WORD	word4
+			struct lpfc_id_range id[53];
+		} rsp;
+	} u;
+};
+
+/*
+ * This is the non-embedded version of ALLOC or GET RSRC_EXTENTS. Word4 in this
+ * structure shares the same SHIFT/MASK/WORD defines provided in the
+ * mbx_alloc_rsrc_extents and mbx_get_alloc_rsrc_extents, word4, provided in
+ * the structures defined above.  This non-embedded structure provides for the
+ * maximum number of extents supported by the port.
+ */
+struct lpfc_mbx_nembed_rsrc_extent {
+	union  lpfc_sli4_cfg_shdr cfg_shdr;
+	uint32_t word4;
+	struct lpfc_id_range id;
+};
+
+struct lpfc_mbx_dealloc_rsrc_extents {
+	struct mbox_header header;
+	struct {
+		uint32_t word4;
+#define lpfc_mbx_dealloc_rsrc_extents_type_SHIFT	0
+#define lpfc_mbx_dealloc_rsrc_extents_type_MASK		0x0000FFFF
+#define lpfc_mbx_dealloc_rsrc_extents_type_WORD		word4
+	} req;
+
+};
+
+/* Start SLI4 FCoE specific mbox structures. */
+
+struct lpfc_mbx_post_hdr_tmpl {
+	struct mbox_header header;
+	uint32_t word10;
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_SHIFT  0
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_MASK   0x0000FFFF
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_WORD   word10
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_SHIFT   16
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_MASK    0x0000FFFF
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_WORD    word10
+	uint32_t rpi_paddr_lo;
+	uint32_t rpi_paddr_hi;
+};
+
+struct sli4_sge {	/* SLI-4 */
+	uint32_t addr_hi;
+	uint32_t addr_lo;
+
+	uint32_t word2;
+#define lpfc_sli4_sge_offset_SHIFT	0
+#define lpfc_sli4_sge_offset_MASK	0x07FFFFFF
+#define lpfc_sli4_sge_offset_WORD	word2
+#define lpfc_sli4_sge_type_SHIFT	27
+#define lpfc_sli4_sge_type_MASK		0x0000000F
+#define lpfc_sli4_sge_type_WORD		word2
+#define LPFC_SGE_TYPE_DATA		0x0
+#define LPFC_SGE_TYPE_DIF		0x4
+#define LPFC_SGE_TYPE_LSP		0x5
+#define LPFC_SGE_TYPE_PEDIF		0x6
+#define LPFC_SGE_TYPE_PESEED		0x7
+#define LPFC_SGE_TYPE_DISEED		0x8
+#define LPFC_SGE_TYPE_ENC		0x9
+#define LPFC_SGE_TYPE_ATM		0xA
+#define LPFC_SGE_TYPE_SKIP		0xC
+#define lpfc_sli4_sge_last_SHIFT	31 /* Last SEG in the SGL sets it */
+#define lpfc_sli4_sge_last_MASK		0x00000001
+#define lpfc_sli4_sge_last_WORD		word2
+	uint32_t sge_len;
+};
+
+struct sli4_sge_diseed {	/* SLI-4 */
+	uint32_t ref_tag;
+	uint32_t ref_tag_tran;
+
+	uint32_t word2;
+#define lpfc_sli4_sge_dif_apptran_SHIFT	0
+#define lpfc_sli4_sge_dif_apptran_MASK	0x0000FFFF
+#define lpfc_sli4_sge_dif_apptran_WORD	word2
+#define lpfc_sli4_sge_dif_af_SHIFT	24
+#define lpfc_sli4_sge_dif_af_MASK	0x00000001
+#define lpfc_sli4_sge_dif_af_WORD	word2
+#define lpfc_sli4_sge_dif_na_SHIFT	25
+#define lpfc_sli4_sge_dif_na_MASK	0x00000001
+#define lpfc_sli4_sge_dif_na_WORD	word2
+#define lpfc_sli4_sge_dif_hi_SHIFT	26
+#define lpfc_sli4_sge_dif_hi_MASK	0x00000001
+#define lpfc_sli4_sge_dif_hi_WORD	word2
+#define lpfc_sli4_sge_dif_type_SHIFT	27
+#define lpfc_sli4_sge_dif_type_MASK	0x0000000F
+#define lpfc_sli4_sge_dif_type_WORD	word2
+#define lpfc_sli4_sge_dif_last_SHIFT	31 /* Last SEG in the SGL sets it */
+#define lpfc_sli4_sge_dif_last_MASK	0x00000001
+#define lpfc_sli4_sge_dif_last_WORD	word2
+	uint32_t word3;
+#define lpfc_sli4_sge_dif_apptag_SHIFT	0
+#define lpfc_sli4_sge_dif_apptag_MASK	0x0000FFFF
+#define lpfc_sli4_sge_dif_apptag_WORD	word3
+#define lpfc_sli4_sge_dif_bs_SHIFT	16
+#define lpfc_sli4_sge_dif_bs_MASK	0x00000007
+#define lpfc_sli4_sge_dif_bs_WORD	word3
+#define lpfc_sli4_sge_dif_ai_SHIFT	19
+#define lpfc_sli4_sge_dif_ai_MASK	0x00000001
+#define lpfc_sli4_sge_dif_ai_WORD	word3
+#define lpfc_sli4_sge_dif_me_SHIFT	20
+#define lpfc_sli4_sge_dif_me_MASK	0x00000001
+#define lpfc_sli4_sge_dif_me_WORD	word3
+#define lpfc_sli4_sge_dif_re_SHIFT	21
+#define lpfc_sli4_sge_dif_re_MASK	0x00000001
+#define lpfc_sli4_sge_dif_re_WORD	word3
+#define lpfc_sli4_sge_dif_ce_SHIFT	22
+#define lpfc_sli4_sge_dif_ce_MASK	0x00000001
+#define lpfc_sli4_sge_dif_ce_WORD	word3
+#define lpfc_sli4_sge_dif_nr_SHIFT	23
+#define lpfc_sli4_sge_dif_nr_MASK	0x00000001
+#define lpfc_sli4_sge_dif_nr_WORD	word3
+#define lpfc_sli4_sge_dif_oprx_SHIFT	24
+#define lpfc_sli4_sge_dif_oprx_MASK	0x0000000F
+#define lpfc_sli4_sge_dif_oprx_WORD	word3
+#define lpfc_sli4_sge_dif_optx_SHIFT	28
+#define lpfc_sli4_sge_dif_optx_MASK	0x0000000F
+#define lpfc_sli4_sge_dif_optx_WORD	word3
+/* optx and oprx use BG_OP_IN defines in lpfc_hw.h */
+};
+
+struct fcf_record {
+	uint32_t max_rcv_size;
+	uint32_t fka_adv_period;
+	uint32_t fip_priority;
+	uint32_t word3;
+#define lpfc_fcf_record_mac_0_SHIFT		0
+#define lpfc_fcf_record_mac_0_MASK		0x000000FF
+#define lpfc_fcf_record_mac_0_WORD		word3
+#define lpfc_fcf_record_mac_1_SHIFT		8
+#define lpfc_fcf_record_mac_1_MASK		0x000000FF
+#define lpfc_fcf_record_mac_1_WORD		word3
+#define lpfc_fcf_record_mac_2_SHIFT		16
+#define lpfc_fcf_record_mac_2_MASK		0x000000FF
+#define lpfc_fcf_record_mac_2_WORD		word3
+#define lpfc_fcf_record_mac_3_SHIFT		24
+#define lpfc_fcf_record_mac_3_MASK		0x000000FF
+#define lpfc_fcf_record_mac_3_WORD		word3
+	uint32_t word4;
+#define lpfc_fcf_record_mac_4_SHIFT		0
+#define lpfc_fcf_record_mac_4_MASK		0x000000FF
+#define lpfc_fcf_record_mac_4_WORD		word4
+#define lpfc_fcf_record_mac_5_SHIFT		8
+#define lpfc_fcf_record_mac_5_MASK		0x000000FF
+#define lpfc_fcf_record_mac_5_WORD		word4
+#define lpfc_fcf_record_fcf_avail_SHIFT		16
+#define lpfc_fcf_record_fcf_avail_MASK		0x000000FF
+#define lpfc_fcf_record_fcf_avail_WORD		word4
+#define lpfc_fcf_record_mac_addr_prov_SHIFT	24
+#define lpfc_fcf_record_mac_addr_prov_MASK	0x000000FF
+#define lpfc_fcf_record_mac_addr_prov_WORD	word4
+#define LPFC_FCF_FPMA           1 	/* Fabric Provided MAC Address */
+#define LPFC_FCF_SPMA           2       /* Server Provided MAC Address */
+	uint32_t word5;
+#define lpfc_fcf_record_fab_name_0_SHIFT	0
+#define lpfc_fcf_record_fab_name_0_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_0_WORD		word5
+#define lpfc_fcf_record_fab_name_1_SHIFT	8
+#define lpfc_fcf_record_fab_name_1_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_1_WORD		word5
+#define lpfc_fcf_record_fab_name_2_SHIFT	16
+#define lpfc_fcf_record_fab_name_2_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_2_WORD		word5
+#define lpfc_fcf_record_fab_name_3_SHIFT	24
+#define lpfc_fcf_record_fab_name_3_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_3_WORD		word5
+	uint32_t word6;
+#define lpfc_fcf_record_fab_name_4_SHIFT	0
+#define lpfc_fcf_record_fab_name_4_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_4_WORD		word6
+#define lpfc_fcf_record_fab_name_5_SHIFT	8
+#define lpfc_fcf_record_fab_name_5_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_5_WORD		word6
+#define lpfc_fcf_record_fab_name_6_SHIFT	16
+#define lpfc_fcf_record_fab_name_6_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_6_WORD		word6
+#define lpfc_fcf_record_fab_name_7_SHIFT	24
+#define lpfc_fcf_record_fab_name_7_MASK		0x000000FF
+#define lpfc_fcf_record_fab_name_7_WORD		word6
+	uint32_t word7;
+#define lpfc_fcf_record_fc_map_0_SHIFT		0
+#define lpfc_fcf_record_fc_map_0_MASK		0x000000FF
+#define lpfc_fcf_record_fc_map_0_WORD		word7
+#define lpfc_fcf_record_fc_map_1_SHIFT		8
+#define lpfc_fcf_record_fc_map_1_MASK		0x000000FF
+#define lpfc_fcf_record_fc_map_1_WORD		word7
+#define lpfc_fcf_record_fc_map_2_SHIFT		16
+#define lpfc_fcf_record_fc_map_2_MASK		0x000000FF
+#define lpfc_fcf_record_fc_map_2_WORD		word7
+#define lpfc_fcf_record_fcf_valid_SHIFT		24
+#define lpfc_fcf_record_fcf_valid_MASK		0x000000FF
+#define lpfc_fcf_record_fcf_valid_WORD		word7
+	uint32_t word8;
+#define lpfc_fcf_record_fcf_index_SHIFT		0
+#define lpfc_fcf_record_fcf_index_MASK		0x0000FFFF
+#define lpfc_fcf_record_fcf_index_WORD		word8
+#define lpfc_fcf_record_fcf_state_SHIFT		16
+#define lpfc_fcf_record_fcf_state_MASK		0x0000FFFF
+#define lpfc_fcf_record_fcf_state_WORD		word8
+	uint8_t vlan_bitmap[512];
+	uint32_t word137;
+#define lpfc_fcf_record_switch_name_0_SHIFT	0
+#define lpfc_fcf_record_switch_name_0_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_0_WORD	word137
+#define lpfc_fcf_record_switch_name_1_SHIFT	8
+#define lpfc_fcf_record_switch_name_1_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_1_WORD	word137
+#define lpfc_fcf_record_switch_name_2_SHIFT	16
+#define lpfc_fcf_record_switch_name_2_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_2_WORD	word137
+#define lpfc_fcf_record_switch_name_3_SHIFT	24
+#define lpfc_fcf_record_switch_name_3_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_3_WORD	word137
+	uint32_t word138;
+#define lpfc_fcf_record_switch_name_4_SHIFT	0
+#define lpfc_fcf_record_switch_name_4_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_4_WORD	word138
+#define lpfc_fcf_record_switch_name_5_SHIFT	8
+#define lpfc_fcf_record_switch_name_5_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_5_WORD	word138
+#define lpfc_fcf_record_switch_name_6_SHIFT	16
+#define lpfc_fcf_record_switch_name_6_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_6_WORD	word138
+#define lpfc_fcf_record_switch_name_7_SHIFT	24
+#define lpfc_fcf_record_switch_name_7_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_7_WORD	word138
+};
+
+struct lpfc_mbx_read_fcf_tbl {
+	union lpfc_sli4_cfg_shdr cfg_shdr;
+	union {
+		struct {
+			uint32_t word10;
+#define lpfc_mbx_read_fcf_tbl_indx_SHIFT	0
+#define lpfc_mbx_read_fcf_tbl_indx_MASK		0x0000FFFF
+#define lpfc_mbx_read_fcf_tbl_indx_WORD		word10
+		} request;
+		struct {
+			uint32_t eventag;
+		} response;
+	} u;
+	uint32_t word11;
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_SHIFT	0
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_MASK	0x0000FFFF
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_WORD	word11
+};
+
+struct lpfc_mbx_add_fcf_tbl_entry {
+	union lpfc_sli4_cfg_shdr cfg_shdr;
+	uint32_t word10;
+#define lpfc_mbx_add_fcf_tbl_fcfi_SHIFT        0
+#define lpfc_mbx_add_fcf_tbl_fcfi_MASK         0x0000FFFF
+#define lpfc_mbx_add_fcf_tbl_fcfi_WORD         word10
+	struct lpfc_mbx_sge fcf_sge;
+};
+
+struct lpfc_mbx_del_fcf_tbl_entry {
+	struct mbox_header header;
+	uint32_t word10;
+#define lpfc_mbx_del_fcf_tbl_count_SHIFT	0
+#define lpfc_mbx_del_fcf_tbl_count_MASK		0x0000FFFF
+#define lpfc_mbx_del_fcf_tbl_count_WORD		word10
+#define lpfc_mbx_del_fcf_tbl_index_SHIFT	16
+#define lpfc_mbx_del_fcf_tbl_index_MASK		0x0000FFFF
+#define lpfc_mbx_del_fcf_tbl_index_WORD		word10
+};
+
+struct lpfc_mbx_redisc_fcf_tbl {
+	struct mbox_header header;
+	uint32_t word10;
+#define lpfc_mbx_redisc_fcf_count_SHIFT		0
+#define lpfc_mbx_redisc_fcf_count_MASK		0x0000FFFF
+#define lpfc_mbx_redisc_fcf_count_WORD		word10
+	uint32_t resvd;
+	uint32_t word12;
+#define lpfc_mbx_redisc_fcf_index_SHIFT		0
+#define lpfc_mbx_redisc_fcf_index_MASK		0x0000FFFF
+#define lpfc_mbx_redisc_fcf_index_WORD		word12
+};
+
+struct lpfc_mbx_query_fw_cfg {
+	struct mbox_header header;
+	uint32_t config_number;
+	uint32_t asic_rev;
+	uint32_t phys_port;
+	uint32_t function_mode;
+/* firmware Function Mode */
+#define lpfc_function_mode_toe_SHIFT		0
+#define lpfc_function_mode_toe_MASK		0x00000001
+#define lpfc_function_mode_toe_WORD		function_mode
+#define lpfc_function_mode_nic_SHIFT		1
+#define lpfc_function_mode_nic_MASK		0x00000001
+#define lpfc_function_mode_nic_WORD		function_mode
+#define lpfc_function_mode_rdma_SHIFT		2
+#define lpfc_function_mode_rdma_MASK		0x00000001
+#define lpfc_function_mode_rdma_WORD		function_mode
+#define lpfc_function_mode_vm_SHIFT		3
+#define lpfc_function_mode_vm_MASK		0x00000001
+#define lpfc_function_mode_vm_WORD		function_mode
+#define lpfc_function_mode_iscsi_i_SHIFT	4
+#define lpfc_function_mode_iscsi_i_MASK		0x00000001
+#define lpfc_function_mode_iscsi_i_WORD		function_mode
+#define lpfc_function_mode_iscsi_t_SHIFT	5
+#define lpfc_function_mode_iscsi_t_MASK		0x00000001
+#define lpfc_function_mode_iscsi_t_WORD		function_mode
+#define lpfc_function_mode_fcoe_i_SHIFT		6
+#define lpfc_function_mode_fcoe_i_MASK		0x00000001
+#define lpfc_function_mode_fcoe_i_WORD		function_mode
+#define lpfc_function_mode_fcoe_t_SHIFT		7
+#define lpfc_function_mode_fcoe_t_MASK		0x00000001
+#define lpfc_function_mode_fcoe_t_WORD		function_mode
+#define lpfc_function_mode_dal_SHIFT		8
+#define lpfc_function_mode_dal_MASK		0x00000001
+#define lpfc_function_mode_dal_WORD		function_mode
+#define lpfc_function_mode_lro_SHIFT		9
+#define lpfc_function_mode_lro_MASK		0x00000001
+#define lpfc_function_mode_lro_WORD		function_mode
+#define lpfc_function_mode_flex10_SHIFT		10
+#define lpfc_function_mode_flex10_MASK		0x00000001
+#define lpfc_function_mode_flex10_WORD		function_mode
+#define lpfc_function_mode_ncsi_SHIFT		11
+#define lpfc_function_mode_ncsi_MASK		0x00000001
+#define lpfc_function_mode_ncsi_WORD		function_mode
+};
+
+/* Status field for embedded SLI_CONFIG mailbox command */
+#define STATUS_SUCCESS					0x0
+#define STATUS_FAILED 					0x1
+#define STATUS_ILLEGAL_REQUEST				0x2
+#define STATUS_ILLEGAL_FIELD				0x3
+#define STATUS_INSUFFICIENT_BUFFER 			0x4
+#define STATUS_UNAUTHORIZED_REQUEST			0x5
+#define STATUS_FLASHROM_SAVE_FAILED			0x17
+#define STATUS_FLASHROM_RESTORE_FAILED			0x18
+#define STATUS_ICCBINDEX_ALLOC_FAILED			0x1a
+#define STATUS_IOCTLHANDLE_ALLOC_FAILED 		0x1b
+#define STATUS_INVALID_PHY_ADDR_FROM_OSM		0x1c
+#define STATUS_INVALID_PHY_ADDR_LEN_FROM_OSM		0x1d
+#define STATUS_ASSERT_FAILED				0x1e
+#define STATUS_INVALID_SESSION				0x1f
+#define STATUS_INVALID_CONNECTION			0x20
+#define STATUS_BTL_PATH_EXCEEDS_OSM_LIMIT		0x21
+#define STATUS_BTL_NO_FREE_SLOT_PATH			0x24
+#define STATUS_BTL_NO_FREE_SLOT_TGTID			0x25
+#define STATUS_OSM_DEVSLOT_NOT_FOUND			0x26
+#define STATUS_FLASHROM_READ_FAILED			0x27
+#define STATUS_POLL_IOCTL_TIMEOUT			0x28
+#define STATUS_ERROR_ACITMAIN				0x2a
+#define STATUS_REBOOT_REQUIRED				0x2c
+#define STATUS_FCF_IN_USE				0x3a
+#define STATUS_FCF_TABLE_EMPTY				0x43
+
+struct lpfc_mbx_sli4_config {
+	struct mbox_header header;
+};
+
+struct lpfc_mbx_init_vfi {
+	uint32_t word1;
+#define lpfc_init_vfi_vr_SHIFT		31
+#define lpfc_init_vfi_vr_MASK		0x00000001
+#define lpfc_init_vfi_vr_WORD		word1
+#define lpfc_init_vfi_vt_SHIFT		30
+#define lpfc_init_vfi_vt_MASK		0x00000001
+#define lpfc_init_vfi_vt_WORD		word1
+#define lpfc_init_vfi_vf_SHIFT		29
+#define lpfc_init_vfi_vf_MASK		0x00000001
+#define lpfc_init_vfi_vf_WORD		word1
+#define lpfc_init_vfi_vp_SHIFT		28
+#define lpfc_init_vfi_vp_MASK		0x00000001
+#define lpfc_init_vfi_vp_WORD		word1
+#define lpfc_init_vfi_vfi_SHIFT		0
+#define lpfc_init_vfi_vfi_MASK		0x0000FFFF
+#define lpfc_init_vfi_vfi_WORD		word1
+	uint32_t word2;
+#define lpfc_init_vfi_vpi_SHIFT		16
+#define lpfc_init_vfi_vpi_MASK		0x0000FFFF
+#define lpfc_init_vfi_vpi_WORD		word2
+#define lpfc_init_vfi_fcfi_SHIFT	0
+#define lpfc_init_vfi_fcfi_MASK		0x0000FFFF
+#define lpfc_init_vfi_fcfi_WORD		word2
+	uint32_t word3;
+#define lpfc_init_vfi_pri_SHIFT		13
+#define lpfc_init_vfi_pri_MASK		0x00000007
+#define lpfc_init_vfi_pri_WORD		word3
+#define lpfc_init_vfi_vf_id_SHIFT	1
+#define lpfc_init_vfi_vf_id_MASK	0x00000FFF
+#define lpfc_init_vfi_vf_id_WORD	word3
+	uint32_t word4;
+#define lpfc_init_vfi_hop_count_SHIFT	24
+#define lpfc_init_vfi_hop_count_MASK	0x000000FF
+#define lpfc_init_vfi_hop_count_WORD	word4
+};
+#define MBX_VFI_IN_USE			0x9F02
+
+
+struct lpfc_mbx_reg_vfi {
+	uint32_t word1;
+#define lpfc_reg_vfi_vp_SHIFT		28
+#define lpfc_reg_vfi_vp_MASK		0x00000001
+#define lpfc_reg_vfi_vp_WORD		word1
+#define lpfc_reg_vfi_vfi_SHIFT		0
+#define lpfc_reg_vfi_vfi_MASK		0x0000FFFF
+#define lpfc_reg_vfi_vfi_WORD		word1
+	uint32_t word2;
+#define lpfc_reg_vfi_vpi_SHIFT		16
+#define lpfc_reg_vfi_vpi_MASK		0x0000FFFF
+#define lpfc_reg_vfi_vpi_WORD		word2
+#define lpfc_reg_vfi_fcfi_SHIFT		0
+#define lpfc_reg_vfi_fcfi_MASK		0x0000FFFF
+#define lpfc_reg_vfi_fcfi_WORD		word2
+	uint32_t wwn[2];
+	struct ulp_bde64 bde;
+	uint32_t e_d_tov;
+	uint32_t r_a_tov;
+	uint32_t word10;
+#define lpfc_reg_vfi_nport_id_SHIFT		0
+#define lpfc_reg_vfi_nport_id_MASK		0x00FFFFFF
+#define lpfc_reg_vfi_nport_id_WORD		word10
+};
+
+struct lpfc_mbx_init_vpi {
+	uint32_t word1;
+#define lpfc_init_vpi_vfi_SHIFT		16
+#define lpfc_init_vpi_vfi_MASK		0x0000FFFF
+#define lpfc_init_vpi_vfi_WORD		word1
+#define lpfc_init_vpi_vpi_SHIFT		0
+#define lpfc_init_vpi_vpi_MASK		0x0000FFFF
+#define lpfc_init_vpi_vpi_WORD		word1
+};
+
+struct lpfc_mbx_read_vpi {
+	uint32_t word1_rsvd;
+	uint32_t word2;
+#define lpfc_mbx_read_vpi_vnportid_SHIFT	0
+#define lpfc_mbx_read_vpi_vnportid_MASK		0x00FFFFFF
+#define lpfc_mbx_read_vpi_vnportid_WORD		word2
+	uint32_t word3_rsvd;
+	uint32_t word4;
+#define lpfc_mbx_read_vpi_acq_alpa_SHIFT	0
+#define lpfc_mbx_read_vpi_acq_alpa_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_acq_alpa_WORD		word4
+#define lpfc_mbx_read_vpi_pb_SHIFT		15
+#define lpfc_mbx_read_vpi_pb_MASK		0x00000001
+#define lpfc_mbx_read_vpi_pb_WORD		word4
+#define lpfc_mbx_read_vpi_spec_alpa_SHIFT	16
+#define lpfc_mbx_read_vpi_spec_alpa_MASK	0x000000FF
+#define lpfc_mbx_read_vpi_spec_alpa_WORD	word4
+#define lpfc_mbx_read_vpi_ns_SHIFT		30
+#define lpfc_mbx_read_vpi_ns_MASK		0x00000001
+#define lpfc_mbx_read_vpi_ns_WORD		word4
+#define lpfc_mbx_read_vpi_hl_SHIFT		31
+#define lpfc_mbx_read_vpi_hl_MASK		0x00000001
+#define lpfc_mbx_read_vpi_hl_WORD		word4
+	uint32_t word5_rsvd;
+	uint32_t word6;
+#define lpfc_mbx_read_vpi_vpi_SHIFT		0
+#define lpfc_mbx_read_vpi_vpi_MASK		0x0000FFFF
+#define lpfc_mbx_read_vpi_vpi_WORD		word6
+	uint32_t word7;
+#define lpfc_mbx_read_vpi_mac_0_SHIFT		0
+#define lpfc_mbx_read_vpi_mac_0_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_0_WORD		word7
+#define lpfc_mbx_read_vpi_mac_1_SHIFT		8
+#define lpfc_mbx_read_vpi_mac_1_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_1_WORD		word7
+#define lpfc_mbx_read_vpi_mac_2_SHIFT		16
+#define lpfc_mbx_read_vpi_mac_2_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_2_WORD		word7
+#define lpfc_mbx_read_vpi_mac_3_SHIFT		24
+#define lpfc_mbx_read_vpi_mac_3_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_3_WORD		word7
+	uint32_t word8;
+#define lpfc_mbx_read_vpi_mac_4_SHIFT		0
+#define lpfc_mbx_read_vpi_mac_4_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_4_WORD		word8
+#define lpfc_mbx_read_vpi_mac_5_SHIFT		8
+#define lpfc_mbx_read_vpi_mac_5_MASK		0x000000FF
+#define lpfc_mbx_read_vpi_mac_5_WORD		word8
+#define lpfc_mbx_read_vpi_vlan_tag_SHIFT	16
+#define lpfc_mbx_read_vpi_vlan_tag_MASK		0x00000FFF
+#define lpfc_mbx_read_vpi_vlan_tag_WORD		word8
+#define lpfc_mbx_read_vpi_vv_SHIFT		28
+#define lpfc_mbx_read_vpi_vv_MASK		0x0000001
+#define lpfc_mbx_read_vpi_vv_WORD		word8
+};
+
+struct lpfc_mbx_unreg_vfi {
+	uint32_t word1_rsvd;
+	uint32_t word2;
+#define lpfc_unreg_vfi_vfi_SHIFT	0
+#define lpfc_unreg_vfi_vfi_MASK		0x0000FFFF
+#define lpfc_unreg_vfi_vfi_WORD		word2
+};
+
+struct lpfc_mbx_resume_rpi {
+	uint32_t word1;
+#define lpfc_resume_rpi_index_SHIFT	0
+#define lpfc_resume_rpi_index_MASK	0x0000FFFF
+#define lpfc_resume_rpi_index_WORD	word1
+#define lpfc_resume_rpi_ii_SHIFT	30
+#define lpfc_resume_rpi_ii_MASK		0x00000003
+#define lpfc_resume_rpi_ii_WORD		word1
+#define RESUME_INDEX_RPI		0
+#define RESUME_INDEX_VPI		1
+#define RESUME_INDEX_VFI		2
+#define RESUME_INDEX_FCFI		3
+	uint32_t event_tag;
+};
+
+#define REG_FCF_INVALID_QID	0xFFFF
+struct lpfc_mbx_reg_fcfi {
+	uint32_t word1;
+#define lpfc_reg_fcfi_info_index_SHIFT	0
+#define lpfc_reg_fcfi_info_index_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_info_index_WORD	word1
+#define lpfc_reg_fcfi_fcfi_SHIFT	16
+#define lpfc_reg_fcfi_fcfi_MASK		0x0000FFFF
+#define lpfc_reg_fcfi_fcfi_WORD		word1
+	uint32_t word2;
+#define lpfc_reg_fcfi_rq_id1_SHIFT	0
+#define lpfc_reg_fcfi_rq_id1_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_rq_id1_WORD	word2
+#define lpfc_reg_fcfi_rq_id0_SHIFT	16
+#define lpfc_reg_fcfi_rq_id0_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_rq_id0_WORD	word2
+	uint32_t word3;
+#define lpfc_reg_fcfi_rq_id3_SHIFT	0
+#define lpfc_reg_fcfi_rq_id3_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_rq_id3_WORD	word3
+#define lpfc_reg_fcfi_rq_id2_SHIFT	16
+#define lpfc_reg_fcfi_rq_id2_MASK	0x0000FFFF
+#define lpfc_reg_fcfi_rq_id2_WORD	word3
+	uint32_t word4;
+#define lpfc_reg_fcfi_type_match0_SHIFT	24
+#define lpfc_reg_fcfi_type_match0_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_match0_WORD	word4
+#define lpfc_reg_fcfi_type_mask0_SHIFT	16
+#define lpfc_reg_fcfi_type_mask0_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_mask0_WORD	word4
+#define lpfc_reg_fcfi_rctl_match0_SHIFT	8
+#define lpfc_reg_fcfi_rctl_match0_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_match0_WORD	word4
+#define lpfc_reg_fcfi_rctl_mask0_SHIFT	0
+#define lpfc_reg_fcfi_rctl_mask0_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_mask0_WORD	word4
+	uint32_t word5;
+#define lpfc_reg_fcfi_type_match1_SHIFT	24
+#define lpfc_reg_fcfi_type_match1_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_match1_WORD	word5
+#define lpfc_reg_fcfi_type_mask1_SHIFT	16
+#define lpfc_reg_fcfi_type_mask1_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_mask1_WORD	word5
+#define lpfc_reg_fcfi_rctl_match1_SHIFT	8
+#define lpfc_reg_fcfi_rctl_match1_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_match1_WORD	word5
+#define lpfc_reg_fcfi_rctl_mask1_SHIFT	0
+#define lpfc_reg_fcfi_rctl_mask1_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_mask1_WORD	word5
+	uint32_t word6;
+#define lpfc_reg_fcfi_type_match2_SHIFT	24
+#define lpfc_reg_fcfi_type_match2_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_match2_WORD	word6
+#define lpfc_reg_fcfi_type_mask2_SHIFT	16
+#define lpfc_reg_fcfi_type_mask2_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_mask2_WORD	word6
+#define lpfc_reg_fcfi_rctl_match2_SHIFT	8
+#define lpfc_reg_fcfi_rctl_match2_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_match2_WORD	word6
+#define lpfc_reg_fcfi_rctl_mask2_SHIFT	0
+#define lpfc_reg_fcfi_rctl_mask2_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_mask2_WORD	word6
+	uint32_t word7;
+#define lpfc_reg_fcfi_type_match3_SHIFT	24
+#define lpfc_reg_fcfi_type_match3_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_match3_WORD	word7
+#define lpfc_reg_fcfi_type_mask3_SHIFT	16
+#define lpfc_reg_fcfi_type_mask3_MASK	0x000000FF
+#define lpfc_reg_fcfi_type_mask3_WORD	word7
+#define lpfc_reg_fcfi_rctl_match3_SHIFT	8
+#define lpfc_reg_fcfi_rctl_match3_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_match3_WORD	word7
+#define lpfc_reg_fcfi_rctl_mask3_SHIFT	0
+#define lpfc_reg_fcfi_rctl_mask3_MASK	0x000000FF
+#define lpfc_reg_fcfi_rctl_mask3_WORD	word7
+	uint32_t word8;
+#define lpfc_reg_fcfi_mam_SHIFT		13
+#define lpfc_reg_fcfi_mam_MASK		0x00000003
+#define lpfc_reg_fcfi_mam_WORD		word8
+#define LPFC_MAM_BOTH		0	/* Both SPMA and FPMA */
+#define LPFC_MAM_SPMA		1	/* Server Provided MAC Address */
+#define LPFC_MAM_FPMA		2	/* Fabric Provided MAC Address */
+#define lpfc_reg_fcfi_vv_SHIFT		12
+#define lpfc_reg_fcfi_vv_MASK		0x00000001
+#define lpfc_reg_fcfi_vv_WORD		word8
+#define lpfc_reg_fcfi_vlan_tag_SHIFT	0
+#define lpfc_reg_fcfi_vlan_tag_MASK	0x00000FFF
+#define lpfc_reg_fcfi_vlan_tag_WORD	word8
+};
+
+struct lpfc_mbx_unreg_fcfi {
+	uint32_t word1_rsv;
+	uint32_t word2;
+#define lpfc_unreg_fcfi_SHIFT		0
+#define lpfc_unreg_fcfi_MASK		0x0000FFFF
+#define lpfc_unreg_fcfi_WORD		word2
+};
+
+struct lpfc_mbx_read_rev {
+	uint32_t word1;
+#define lpfc_mbx_rd_rev_sli_lvl_SHIFT  		16
+#define lpfc_mbx_rd_rev_sli_lvl_MASK   		0x0000000F
+#define lpfc_mbx_rd_rev_sli_lvl_WORD   		word1
+#define lpfc_mbx_rd_rev_fcoe_SHIFT		20
+#define lpfc_mbx_rd_rev_fcoe_MASK		0x00000001
+#define lpfc_mbx_rd_rev_fcoe_WORD		word1
+#define lpfc_mbx_rd_rev_cee_ver_SHIFT		21
+#define lpfc_mbx_rd_rev_cee_ver_MASK		0x00000003
+#define lpfc_mbx_rd_rev_cee_ver_WORD		word1
+#define LPFC_PREDCBX_CEE_MODE	0
+#define LPFC_DCBX_CEE_MODE	1
+#define lpfc_mbx_rd_rev_vpd_SHIFT		29
+#define lpfc_mbx_rd_rev_vpd_MASK		0x00000001
+#define lpfc_mbx_rd_rev_vpd_WORD		word1
+	uint32_t first_hw_rev;
+	uint32_t second_hw_rev;
+	uint32_t word4_rsvd;
+	uint32_t third_hw_rev;
+	uint32_t word6;
+#define lpfc_mbx_rd_rev_fcph_low_SHIFT		0
+#define lpfc_mbx_rd_rev_fcph_low_MASK		0x000000FF
+#define lpfc_mbx_rd_rev_fcph_low_WORD		word6
+#define lpfc_mbx_rd_rev_fcph_high_SHIFT		8
+#define lpfc_mbx_rd_rev_fcph_high_MASK		0x000000FF
+#define lpfc_mbx_rd_rev_fcph_high_WORD		word6
+#define lpfc_mbx_rd_rev_ftr_lvl_low_SHIFT	16
+#define lpfc_mbx_rd_rev_ftr_lvl_low_MASK	0x000000FF
+#define lpfc_mbx_rd_rev_ftr_lvl_low_WORD	word6
+#define lpfc_mbx_rd_rev_ftr_lvl_high_SHIFT	24
+#define lpfc_mbx_rd_rev_ftr_lvl_high_MASK	0x000000FF
+#define lpfc_mbx_rd_rev_ftr_lvl_high_WORD	word6
+	uint32_t word7_rsvd;
+	uint32_t fw_id_rev;
+	uint8_t  fw_name[16];
+	uint32_t ulp_fw_id_rev;
+	uint8_t  ulp_fw_name[16];
+	uint32_t word18_47_rsvd[30];
+	uint32_t word48;
+#define lpfc_mbx_rd_rev_avail_len_SHIFT		0
+#define lpfc_mbx_rd_rev_avail_len_MASK		0x00FFFFFF
+#define lpfc_mbx_rd_rev_avail_len_WORD		word48
+	uint32_t vpd_paddr_low;
+	uint32_t vpd_paddr_high;
+	uint32_t avail_vpd_len;
+	uint32_t rsvd_52_63[12];
+};
+
+struct lpfc_mbx_read_config {
+	uint32_t word1;
+#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT	31
+#define lpfc_mbx_rd_conf_extnts_inuse_MASK	0x00000001
+#define lpfc_mbx_rd_conf_extnts_inuse_WORD	word1
+	uint32_t word2;
+#define lpfc_mbx_rd_conf_lnk_numb_SHIFT		0
+#define lpfc_mbx_rd_conf_lnk_numb_MASK		0x0000003F
+#define lpfc_mbx_rd_conf_lnk_numb_WORD		word2
+#define lpfc_mbx_rd_conf_lnk_type_SHIFT		6
+#define lpfc_mbx_rd_conf_lnk_type_MASK		0x00000003
+#define lpfc_mbx_rd_conf_lnk_type_WORD		word2
+#define LPFC_LNK_TYPE_GE	0
+#define LPFC_LNK_TYPE_FC	1
+#define lpfc_mbx_rd_conf_lnk_ldv_SHIFT		8
+#define lpfc_mbx_rd_conf_lnk_ldv_MASK		0x00000001
+#define lpfc_mbx_rd_conf_lnk_ldv_WORD		word2
+#define lpfc_mbx_rd_conf_topology_SHIFT		24
+#define lpfc_mbx_rd_conf_topology_MASK		0x000000FF
+#define lpfc_mbx_rd_conf_topology_WORD		word2
+	uint32_t rsvd_3;
+	uint32_t word4;
+#define lpfc_mbx_rd_conf_e_d_tov_SHIFT		0
+#define lpfc_mbx_rd_conf_e_d_tov_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_e_d_tov_WORD		word4
+	uint32_t rsvd_5;
+	uint32_t word6;
+#define lpfc_mbx_rd_conf_r_a_tov_SHIFT		0
+#define lpfc_mbx_rd_conf_r_a_tov_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_r_a_tov_WORD		word6
+	uint32_t rsvd_7;
+	uint32_t rsvd_8;
+	uint32_t word9;
+#define lpfc_mbx_rd_conf_lmt_SHIFT		0
+#define lpfc_mbx_rd_conf_lmt_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_lmt_WORD		word9
+	uint32_t rsvd_10;
+	uint32_t rsvd_11;
+	uint32_t word12;
+#define lpfc_mbx_rd_conf_xri_base_SHIFT		0
+#define lpfc_mbx_rd_conf_xri_base_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_xri_base_WORD		word12
+#define lpfc_mbx_rd_conf_xri_count_SHIFT	16
+#define lpfc_mbx_rd_conf_xri_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_xri_count_WORD		word12
+	uint32_t word13;
+#define lpfc_mbx_rd_conf_rpi_base_SHIFT		0
+#define lpfc_mbx_rd_conf_rpi_base_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_rpi_base_WORD		word13
+#define lpfc_mbx_rd_conf_rpi_count_SHIFT	16
+#define lpfc_mbx_rd_conf_rpi_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_rpi_count_WORD		word13
+	uint32_t word14;
+#define lpfc_mbx_rd_conf_vpi_base_SHIFT		0
+#define lpfc_mbx_rd_conf_vpi_base_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_vpi_base_WORD		word14
+#define lpfc_mbx_rd_conf_vpi_count_SHIFT	16
+#define lpfc_mbx_rd_conf_vpi_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_vpi_count_WORD		word14
+	uint32_t word15;
+#define lpfc_mbx_rd_conf_vfi_base_SHIFT         0
+#define lpfc_mbx_rd_conf_vfi_base_MASK          0x0000FFFF
+#define lpfc_mbx_rd_conf_vfi_base_WORD          word15
+#define lpfc_mbx_rd_conf_vfi_count_SHIFT        16
+#define lpfc_mbx_rd_conf_vfi_count_MASK         0x0000FFFF
+#define lpfc_mbx_rd_conf_vfi_count_WORD         word15
+	uint32_t word16;
+#define lpfc_mbx_rd_conf_fcfi_count_SHIFT	16
+#define lpfc_mbx_rd_conf_fcfi_count_MASK	0x0000FFFF
+#define lpfc_mbx_rd_conf_fcfi_count_WORD	word16
+	uint32_t word17;
+#define lpfc_mbx_rd_conf_rq_count_SHIFT		0
+#define lpfc_mbx_rd_conf_rq_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_rq_count_WORD		word17
+#define lpfc_mbx_rd_conf_eq_count_SHIFT		16
+#define lpfc_mbx_rd_conf_eq_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_eq_count_WORD		word17
+	uint32_t word18;
+#define lpfc_mbx_rd_conf_wq_count_SHIFT		0
+#define lpfc_mbx_rd_conf_wq_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_wq_count_WORD		word18
+#define lpfc_mbx_rd_conf_cq_count_SHIFT		16
+#define lpfc_mbx_rd_conf_cq_count_MASK		0x0000FFFF
+#define lpfc_mbx_rd_conf_cq_count_WORD		word18
+};
+
+struct lpfc_mbx_request_features {
+	uint32_t word1;
+#define lpfc_mbx_rq_ftr_qry_SHIFT		0
+#define lpfc_mbx_rq_ftr_qry_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_qry_WORD		word1
+	uint32_t word2;
+#define lpfc_mbx_rq_ftr_rq_iaab_SHIFT		0
+#define lpfc_mbx_rq_ftr_rq_iaab_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_iaab_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_npiv_SHIFT		1
+#define lpfc_mbx_rq_ftr_rq_npiv_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_npiv_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_dif_SHIFT		2
+#define lpfc_mbx_rq_ftr_rq_dif_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_dif_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_vf_SHIFT		3
+#define lpfc_mbx_rq_ftr_rq_vf_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_vf_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_fcpi_SHIFT		4
+#define lpfc_mbx_rq_ftr_rq_fcpi_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpi_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_fcpt_SHIFT		5
+#define lpfc_mbx_rq_ftr_rq_fcpt_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpt_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_fcpc_SHIFT		6
+#define lpfc_mbx_rq_ftr_rq_fcpc_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpc_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT		7
+#define lpfc_mbx_rq_ftr_rq_ifip_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_ifip_WORD		word2
+#define lpfc_mbx_rq_ftr_rq_perfh_SHIFT		11
+#define lpfc_mbx_rq_ftr_rq_perfh_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rq_perfh_WORD		word2
+	uint32_t word3;
+#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT		0
+#define lpfc_mbx_rq_ftr_rsp_iaab_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_iaab_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_npiv_SHIFT		1
+#define lpfc_mbx_rq_ftr_rsp_npiv_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_npiv_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_dif_SHIFT		2
+#define lpfc_mbx_rq_ftr_rsp_dif_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_dif_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_vf_SHIFT		3
+#define lpfc_mbx_rq_ftr_rsp_vf__MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_vf_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_fcpi_SHIFT		4
+#define lpfc_mbx_rq_ftr_rsp_fcpi_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpi_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_fcpt_SHIFT		5
+#define lpfc_mbx_rq_ftr_rsp_fcpt_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpt_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_fcpc_SHIFT		6
+#define lpfc_mbx_rq_ftr_rsp_fcpc_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpc_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT		7
+#define lpfc_mbx_rq_ftr_rsp_ifip_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_ifip_WORD		word3
+#define lpfc_mbx_rq_ftr_rsp_perfh_SHIFT		11
+#define lpfc_mbx_rq_ftr_rsp_perfh_MASK		0x00000001
+#define lpfc_mbx_rq_ftr_rsp_perfh_WORD		word3
+};
+
+struct lpfc_mbx_supp_pages {
+	uint32_t word1;
+#define qs_SHIFT 				0
+#define qs_MASK					0x00000001
+#define qs_WORD					word1
+#define wr_SHIFT				1
+#define wr_MASK 				0x00000001
+#define wr_WORD					word1
+#define pf_SHIFT				8
+#define pf_MASK					0x000000ff
+#define pf_WORD					word1
+#define cpn_SHIFT				16
+#define cpn_MASK				0x000000ff
+#define cpn_WORD				word1
+	uint32_t word2;
+#define list_offset_SHIFT 			0
+#define list_offset_MASK			0x000000ff
+#define list_offset_WORD			word2
+#define next_offset_SHIFT			8
+#define next_offset_MASK			0x000000ff
+#define next_offset_WORD			word2
+#define elem_cnt_SHIFT				16
+#define elem_cnt_MASK				0x000000ff
+#define elem_cnt_WORD				word2
+	uint32_t word3;
+#define pn_0_SHIFT				24
+#define pn_0_MASK  				0x000000ff
+#define pn_0_WORD				word3
+#define pn_1_SHIFT				16
+#define pn_1_MASK				0x000000ff
+#define pn_1_WORD				word3
+#define pn_2_SHIFT				8
+#define pn_2_MASK				0x000000ff
+#define pn_2_WORD				word3
+#define pn_3_SHIFT				0
+#define pn_3_MASK				0x000000ff
+#define pn_3_WORD				word3
+	uint32_t word4;
+#define pn_4_SHIFT				24
+#define pn_4_MASK				0x000000ff
+#define pn_4_WORD				word4
+#define pn_5_SHIFT				16
+#define pn_5_MASK				0x000000ff
+#define pn_5_WORD				word4
+#define pn_6_SHIFT				8
+#define pn_6_MASK				0x000000ff
+#define pn_6_WORD				word4
+#define pn_7_SHIFT				0
+#define pn_7_MASK				0x000000ff
+#define pn_7_WORD				word4
+	uint32_t rsvd[27];
+#define LPFC_SUPP_PAGES			0
+#define LPFC_BLOCK_GUARD_PROFILES	1
+#define LPFC_SLI4_PARAMETERS		2
+};
+
+struct lpfc_mbx_pc_sli4_params {
+	uint32_t word1;
+#define qs_SHIFT				0
+#define qs_MASK					0x00000001
+#define qs_WORD					word1
+#define wr_SHIFT				1
+#define wr_MASK					0x00000001
+#define wr_WORD					word1
+#define pf_SHIFT				8
+#define pf_MASK					0x000000ff
+#define pf_WORD					word1
+#define cpn_SHIFT				16
+#define cpn_MASK				0x000000ff
+#define cpn_WORD				word1
+	uint32_t word2;
+#define if_type_SHIFT				0
+#define if_type_MASK				0x00000007
+#define if_type_WORD				word2
+#define sli_rev_SHIFT				4
+#define sli_rev_MASK				0x0000000f
+#define sli_rev_WORD				word2
+#define sli_family_SHIFT			8
+#define sli_family_MASK				0x000000ff
+#define sli_family_WORD				word2
+#define featurelevel_1_SHIFT			16
+#define featurelevel_1_MASK			0x000000ff
+#define featurelevel_1_WORD			word2
+#define featurelevel_2_SHIFT			24
+#define featurelevel_2_MASK			0x0000001f
+#define featurelevel_2_WORD			word2
+	uint32_t word3;
+#define fcoe_SHIFT 				0
+#define fcoe_MASK				0x00000001
+#define fcoe_WORD				word3
+#define fc_SHIFT				1
+#define fc_MASK					0x00000001
+#define fc_WORD					word3
+#define nic_SHIFT				2
+#define nic_MASK				0x00000001
+#define nic_WORD				word3
+#define iscsi_SHIFT				3
+#define iscsi_MASK				0x00000001
+#define iscsi_WORD				word3
+#define rdma_SHIFT				4
+#define rdma_MASK				0x00000001
+#define rdma_WORD				word3
+	uint32_t sge_supp_len;
+#define SLI4_PAGE_SIZE 4096
+	uint32_t word5;
+#define if_page_sz_SHIFT			0
+#define if_page_sz_MASK				0x0000ffff
+#define if_page_sz_WORD				word5
+#define loopbk_scope_SHIFT			24
+#define loopbk_scope_MASK			0x0000000f
+#define loopbk_scope_WORD			word5
+#define rq_db_window_SHIFT			28
+#define rq_db_window_MASK			0x0000000f
+#define rq_db_window_WORD			word5
+	uint32_t word6;
+#define eq_pages_SHIFT				0
+#define eq_pages_MASK				0x0000000f
+#define eq_pages_WORD				word6
+#define eqe_size_SHIFT				8
+#define eqe_size_MASK				0x000000ff
+#define eqe_size_WORD				word6
+	uint32_t word7;
+#define cq_pages_SHIFT				0
+#define cq_pages_MASK				0x0000000f
+#define cq_pages_WORD				word7
+#define cqe_size_SHIFT				8
+#define cqe_size_MASK				0x000000ff
+#define cqe_size_WORD				word7
+	uint32_t word8;
+#define mq_pages_SHIFT				0
+#define mq_pages_MASK				0x0000000f
+#define mq_pages_WORD				word8
+#define mqe_size_SHIFT				8
+#define mqe_size_MASK				0x000000ff
+#define mqe_size_WORD				word8
+#define mq_elem_cnt_SHIFT			16
+#define mq_elem_cnt_MASK			0x000000ff
+#define mq_elem_cnt_WORD			word8
+	uint32_t word9;
+#define wq_pages_SHIFT				0
+#define wq_pages_MASK				0x0000ffff
+#define wq_pages_WORD				word9
+#define wqe_size_SHIFT				8
+#define wqe_size_MASK				0x000000ff
+#define wqe_size_WORD				word9
+	uint32_t word10;
+#define rq_pages_SHIFT				0
+#define rq_pages_MASK				0x0000ffff
+#define rq_pages_WORD				word10
+#define rqe_size_SHIFT				8
+#define rqe_size_MASK				0x000000ff
+#define rqe_size_WORD				word10
+	uint32_t word11;
+#define hdr_pages_SHIFT				0
+#define hdr_pages_MASK				0x0000000f
+#define hdr_pages_WORD				word11
+#define hdr_size_SHIFT				8
+#define hdr_size_MASK				0x0000000f
+#define hdr_size_WORD				word11
+#define hdr_pp_align_SHIFT			16
+#define hdr_pp_align_MASK			0x0000ffff
+#define hdr_pp_align_WORD			word11
+	uint32_t word12;
+#define sgl_pages_SHIFT				0
+#define sgl_pages_MASK				0x0000000f
+#define sgl_pages_WORD				word12
+#define sgl_pp_align_SHIFT			16
+#define sgl_pp_align_MASK			0x0000ffff
+#define sgl_pp_align_WORD			word12
+	uint32_t rsvd_13_63[51];
+};
+#define SLI4_PAGE_ALIGN(addr) (((addr)+((SLI4_PAGE_SIZE)-1)) \
+			       &(~((SLI4_PAGE_SIZE)-1)))
+
+struct lpfc_sli4_parameters {
+	uint32_t word0;
+#define cfg_prot_type_SHIFT			0
+#define cfg_prot_type_MASK			0x000000FF
+#define cfg_prot_type_WORD			word0
+	uint32_t word1;
+#define cfg_ft_SHIFT				0
+#define cfg_ft_MASK				0x00000001
+#define cfg_ft_WORD				word1
+#define cfg_sli_rev_SHIFT			4
+#define cfg_sli_rev_MASK			0x0000000f
+#define cfg_sli_rev_WORD			word1
+#define cfg_sli_family_SHIFT			8
+#define cfg_sli_family_MASK			0x0000000f
+#define cfg_sli_family_WORD			word1
+#define cfg_if_type_SHIFT			12
+#define cfg_if_type_MASK			0x0000000f
+#define cfg_if_type_WORD			word1
+#define cfg_sli_hint_1_SHIFT			16
+#define cfg_sli_hint_1_MASK			0x000000ff
+#define cfg_sli_hint_1_WORD			word1
+#define cfg_sli_hint_2_SHIFT			24
+#define cfg_sli_hint_2_MASK			0x0000001f
+#define cfg_sli_hint_2_WORD			word1
+	uint32_t word2;
+	uint32_t word3;
+	uint32_t word4;
+#define cfg_cqv_SHIFT				14
+#define cfg_cqv_MASK				0x00000003
+#define cfg_cqv_WORD				word4
+	uint32_t word5;
+	uint32_t word6;
+#define cfg_mqv_SHIFT				14
+#define cfg_mqv_MASK				0x00000003
+#define cfg_mqv_WORD				word6
+	uint32_t word7;
+	uint32_t word8;
+#define cfg_wqv_SHIFT				14
+#define cfg_wqv_MASK				0x00000003
+#define cfg_wqv_WORD				word8
+	uint32_t word9;
+	uint32_t word10;
+#define cfg_rqv_SHIFT				14
+#define cfg_rqv_MASK				0x00000003
+#define cfg_rqv_WORD				word10
+	uint32_t word11;
+#define cfg_rq_db_window_SHIFT			28
+#define cfg_rq_db_window_MASK			0x0000000f
+#define cfg_rq_db_window_WORD			word11
+	uint32_t word12;
+#define cfg_fcoe_SHIFT				0
+#define cfg_fcoe_MASK				0x00000001
+#define cfg_fcoe_WORD				word12
+#define cfg_ext_SHIFT				1
+#define cfg_ext_MASK				0x00000001
+#define cfg_ext_WORD				word12
+#define cfg_hdrr_SHIFT				2
+#define cfg_hdrr_MASK				0x00000001
+#define cfg_hdrr_WORD				word12
+#define cfg_phwq_SHIFT				15
+#define cfg_phwq_MASK				0x00000001
+#define cfg_phwq_WORD				word12
+#define cfg_loopbk_scope_SHIFT			28
+#define cfg_loopbk_scope_MASK			0x0000000f
+#define cfg_loopbk_scope_WORD			word12
+	uint32_t sge_supp_len;
+	uint32_t word14;
+#define cfg_sgl_page_cnt_SHIFT			0
+#define cfg_sgl_page_cnt_MASK			0x0000000f
+#define cfg_sgl_page_cnt_WORD			word14
+#define cfg_sgl_page_size_SHIFT			8
+#define cfg_sgl_page_size_MASK			0x000000ff
+#define cfg_sgl_page_size_WORD			word14
+#define cfg_sgl_pp_align_SHIFT			16
+#define cfg_sgl_pp_align_MASK			0x000000ff
+#define cfg_sgl_pp_align_WORD			word14
+	uint32_t word15;
+	uint32_t word16;
+	uint32_t word17;
+	uint32_t word18;
+	uint32_t word19;
+};
+
+struct lpfc_mbx_get_sli4_parameters {
+	struct mbox_header header;
+	struct lpfc_sli4_parameters sli4_parameters;
+};
+
+struct lpfc_rscr_desc_generic {
+#define LPFC_RSRC_DESC_WSIZE			18
+	uint32_t desc[LPFC_RSRC_DESC_WSIZE];
+};
+
+struct lpfc_rsrc_desc_pcie {
+	uint32_t word0;
+#define lpfc_rsrc_desc_pcie_type_SHIFT		0
+#define lpfc_rsrc_desc_pcie_type_MASK		0x000000ff
+#define lpfc_rsrc_desc_pcie_type_WORD		word0
+#define LPFC_RSRC_DESC_TYPE_PCIE		0x40
+	uint32_t word1;
+#define lpfc_rsrc_desc_pcie_pfnum_SHIFT		0
+#define lpfc_rsrc_desc_pcie_pfnum_MASK		0x000000ff
+#define lpfc_rsrc_desc_pcie_pfnum_WORD		word1
+	uint32_t reserved;
+	uint32_t word3;
+#define lpfc_rsrc_desc_pcie_sriov_sta_SHIFT	0
+#define lpfc_rsrc_desc_pcie_sriov_sta_MASK	0x000000ff
+#define lpfc_rsrc_desc_pcie_sriov_sta_WORD	word3
+#define lpfc_rsrc_desc_pcie_pf_sta_SHIFT	8
+#define lpfc_rsrc_desc_pcie_pf_sta_MASK		0x000000ff
+#define lpfc_rsrc_desc_pcie_pf_sta_WORD		word3
+#define lpfc_rsrc_desc_pcie_pf_type_SHIFT	16
+#define lpfc_rsrc_desc_pcie_pf_type_MASK	0x000000ff
+#define lpfc_rsrc_desc_pcie_pf_type_WORD	word3
+	uint32_t word4;
+#define lpfc_rsrc_desc_pcie_nr_virtfn_SHIFT	0
+#define lpfc_rsrc_desc_pcie_nr_virtfn_MASK	0x0000ffff
+#define lpfc_rsrc_desc_pcie_nr_virtfn_WORD	word4
+};
+
+struct lpfc_rsrc_desc_fcfcoe {
+	uint32_t word0;
+#define lpfc_rsrc_desc_fcfcoe_type_SHIFT	0
+#define lpfc_rsrc_desc_fcfcoe_type_MASK		0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_type_WORD		word0
+#define LPFC_RSRC_DESC_TYPE_FCFCOE		0x43
+	uint32_t word1;
+#define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT	0
+#define lpfc_rsrc_desc_fcfcoe_vfnum_MASK	0x000000ff
+#define lpfc_rsrc_desc_fcfcoe_vfnum_WORD	word1
+#define lpfc_rsrc_desc_fcfcoe_pfnum_SHIFT	16
+#define lpfc_rsrc_desc_fcfcoe_pfnum_MASK        0x000007ff
+#define lpfc_rsrc_desc_fcfcoe_pfnum_WORD        word1
+	uint32_t word2;
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_SHIFT	0
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_MASK	0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_rpi_cnt_WORD	word2
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_SHIFT	16
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_MASK	0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_xri_cnt_WORD	word2
+	uint32_t word3;
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_SHIFT	0
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_MASK	0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_wq_cnt_WORD	word3
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_SHIFT	16
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_MASK	0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_rq_cnt_WORD	word3
+	uint32_t word4;
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_SHIFT	0
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_MASK	0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_cq_cnt_WORD	word4
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_SHIFT	16
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_MASK	0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_vpi_cnt_WORD	word4
+	uint32_t word5;
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_SHIFT	0
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_MASK	0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_fcfi_cnt_WORD	word5
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_SHIFT	16
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_MASK	0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_vfi_cnt_WORD	word5
+	uint32_t word6;
+	uint32_t word7;
+	uint32_t word8;
+	uint32_t word9;
+	uint32_t word10;
+	uint32_t word11;
+	uint32_t word12;
+	uint32_t word13;
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_SHIFT	0
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_MASK	0x0000003f
+#define lpfc_rsrc_desc_fcfcoe_lnk_nr_WORD	word13
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_SHIFT      6
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_MASK	0x00000003
+#define lpfc_rsrc_desc_fcfcoe_lnk_tp_WORD	word13
+#define lpfc_rsrc_desc_fcfcoe_lmc_SHIFT		8
+#define lpfc_rsrc_desc_fcfcoe_lmc_MASK		0x00000001
+#define lpfc_rsrc_desc_fcfcoe_lmc_WORD		word13
+#define lpfc_rsrc_desc_fcfcoe_lld_SHIFT		9
+#define lpfc_rsrc_desc_fcfcoe_lld_MASK		0x00000001
+#define lpfc_rsrc_desc_fcfcoe_lld_WORD		word13
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT	16
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK	0x0000ffff
+#define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD	word13
+};
+
+struct lpfc_func_cfg {
+#define LPFC_RSRC_DESC_MAX_NUM			2
+	uint32_t rsrc_desc_count;
+	struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
+};
+
+struct lpfc_mbx_get_func_cfg {
+	struct mbox_header header;
+#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE	0x0
+#define LPFC_CFG_TYPE_FACTURY_DEFAULT		0x1
+#define LPFC_CFG_TYPE_CURRENT_ACTIVE		0x2
+	struct lpfc_func_cfg func_cfg;
+};
+
+struct lpfc_prof_cfg {
+#define LPFC_RSRC_DESC_MAX_NUM			2
+	uint32_t rsrc_desc_count;
+	struct lpfc_rscr_desc_generic desc[LPFC_RSRC_DESC_MAX_NUM];
+};
+
+struct lpfc_mbx_get_prof_cfg {
+	struct mbox_header header;
+#define LPFC_CFG_TYPE_PERSISTENT_OVERRIDE	0x0
+#define LPFC_CFG_TYPE_FACTURY_DEFAULT		0x1
+#define LPFC_CFG_TYPE_CURRENT_ACTIVE		0x2
+	union {
+		struct {
+			uint32_t word10;
+#define lpfc_mbx_get_prof_cfg_prof_id_SHIFT	0
+#define lpfc_mbx_get_prof_cfg_prof_id_MASK	0x000000ff
+#define lpfc_mbx_get_prof_cfg_prof_id_WORD	word10
+#define lpfc_mbx_get_prof_cfg_prof_tp_SHIFT	8
+#define lpfc_mbx_get_prof_cfg_prof_tp_MASK	0x00000003
+#define lpfc_mbx_get_prof_cfg_prof_tp_WORD	word10
+		} request;
+		struct {
+			struct lpfc_prof_cfg prof_cfg;
+		} response;
+	} u;
+};
+
+struct lpfc_controller_attribute {
+	uint32_t version_string[8];
+	uint32_t manufacturer_name[8];
+	uint32_t supported_modes;
+	uint32_t word17;
+#define lpfc_cntl_attr_eprom_ver_lo_SHIFT	0
+#define lpfc_cntl_attr_eprom_ver_lo_MASK	0x000000ff
+#define lpfc_cntl_attr_eprom_ver_lo_WORD	word17
+#define lpfc_cntl_attr_eprom_ver_hi_SHIFT	8
+#define lpfc_cntl_attr_eprom_ver_hi_MASK	0x000000ff
+#define lpfc_cntl_attr_eprom_ver_hi_WORD	word17
+	uint32_t mbx_da_struct_ver;
+	uint32_t ep_fw_da_struct_ver;
+	uint32_t ncsi_ver_str[3];
+	uint32_t dflt_ext_timeout;
+	uint32_t model_number[8];
+	uint32_t description[16];
+	uint32_t serial_number[8];
+	uint32_t ip_ver_str[8];
+	uint32_t fw_ver_str[8];
+	uint32_t bios_ver_str[8];
+	uint32_t redboot_ver_str[8];
+	uint32_t driver_ver_str[8];
+	uint32_t flash_fw_ver_str[8];
+	uint32_t functionality;
+	uint32_t word105;
+#define lpfc_cntl_attr_max_cbd_len_SHIFT	0
+#define lpfc_cntl_attr_max_cbd_len_MASK		0x0000ffff
+#define lpfc_cntl_attr_max_cbd_len_WORD		word105
+#define lpfc_cntl_attr_asic_rev_SHIFT		16
+#define lpfc_cntl_attr_asic_rev_MASK		0x000000ff
+#define lpfc_cntl_attr_asic_rev_WORD		word105
+#define lpfc_cntl_attr_gen_guid0_SHIFT		24
+#define lpfc_cntl_attr_gen_guid0_MASK		0x000000ff
+#define lpfc_cntl_attr_gen_guid0_WORD		word105
+	uint32_t gen_guid1_12[3];
+	uint32_t word109;
+#define lpfc_cntl_attr_gen_guid13_14_SHIFT	0
+#define lpfc_cntl_attr_gen_guid13_14_MASK	0x0000ffff
+#define lpfc_cntl_attr_gen_guid13_14_WORD	word109
+#define lpfc_cntl_attr_gen_guid15_SHIFT		16
+#define lpfc_cntl_attr_gen_guid15_MASK		0x000000ff
+#define lpfc_cntl_attr_gen_guid15_WORD		word109
+#define lpfc_cntl_attr_hba_port_cnt_SHIFT	24
+#define lpfc_cntl_attr_hba_port_cnt_MASK	0x000000ff
+#define lpfc_cntl_attr_hba_port_cnt_WORD	word109
+	uint32_t word110;
+#define lpfc_cntl_attr_dflt_lnk_tmo_SHIFT	0
+#define lpfc_cntl_attr_dflt_lnk_tmo_MASK	0x0000ffff
+#define lpfc_cntl_attr_dflt_lnk_tmo_WORD	word110
+#define lpfc_cntl_attr_multi_func_dev_SHIFT	24
+#define lpfc_cntl_attr_multi_func_dev_MASK	0x000000ff
+#define lpfc_cntl_attr_multi_func_dev_WORD	word110
+	uint32_t word111;
+#define lpfc_cntl_attr_cache_valid_SHIFT	0
+#define lpfc_cntl_attr_cache_valid_MASK		0x000000ff
+#define lpfc_cntl_attr_cache_valid_WORD		word111
+#define lpfc_cntl_attr_hba_status_SHIFT		8
+#define lpfc_cntl_attr_hba_status_MASK		0x000000ff
+#define lpfc_cntl_attr_hba_status_WORD		word111
+#define lpfc_cntl_attr_max_domain_SHIFT		16
+#define lpfc_cntl_attr_max_domain_MASK		0x000000ff
+#define lpfc_cntl_attr_max_domain_WORD		word111
+#define lpfc_cntl_attr_lnk_numb_SHIFT		24
+#define lpfc_cntl_attr_lnk_numb_MASK		0x0000003f
+#define lpfc_cntl_attr_lnk_numb_WORD		word111
+#define lpfc_cntl_attr_lnk_type_SHIFT		30
+#define lpfc_cntl_attr_lnk_type_MASK		0x00000003
+#define lpfc_cntl_attr_lnk_type_WORD		word111
+	uint32_t fw_post_status;
+	uint32_t hba_mtu[8];
+	uint32_t word121;
+	uint32_t reserved1[3];
+	uint32_t word125;
+#define lpfc_cntl_attr_pci_vendor_id_SHIFT	0
+#define lpfc_cntl_attr_pci_vendor_id_MASK	0x0000ffff
+#define lpfc_cntl_attr_pci_vendor_id_WORD	word125
+#define lpfc_cntl_attr_pci_device_id_SHIFT	16
+#define lpfc_cntl_attr_pci_device_id_MASK	0x0000ffff
+#define lpfc_cntl_attr_pci_device_id_WORD	word125
+	uint32_t word126;
+#define lpfc_cntl_attr_pci_subvdr_id_SHIFT	0
+#define lpfc_cntl_attr_pci_subvdr_id_MASK	0x0000ffff
+#define lpfc_cntl_attr_pci_subvdr_id_WORD	word126
+#define lpfc_cntl_attr_pci_subsys_id_SHIFT	16
+#define lpfc_cntl_attr_pci_subsys_id_MASK	0x0000ffff
+#define lpfc_cntl_attr_pci_subsys_id_WORD	word126
+	uint32_t word127;
+#define lpfc_cntl_attr_pci_bus_num_SHIFT	0
+#define lpfc_cntl_attr_pci_bus_num_MASK		0x000000ff
+#define lpfc_cntl_attr_pci_bus_num_WORD		word127
+#define lpfc_cntl_attr_pci_dev_num_SHIFT	8
+#define lpfc_cntl_attr_pci_dev_num_MASK		0x000000ff
+#define lpfc_cntl_attr_pci_dev_num_WORD		word127
+#define lpfc_cntl_attr_pci_fnc_num_SHIFT	16
+#define lpfc_cntl_attr_pci_fnc_num_MASK		0x000000ff
+#define lpfc_cntl_attr_pci_fnc_num_WORD		word127
+#define lpfc_cntl_attr_inf_type_SHIFT		24
+#define lpfc_cntl_attr_inf_type_MASK		0x000000ff
+#define lpfc_cntl_attr_inf_type_WORD		word127
+	uint32_t unique_id[2];
+	uint32_t word130;
+#define lpfc_cntl_attr_num_netfil_SHIFT		0
+#define lpfc_cntl_attr_num_netfil_MASK		0x000000ff
+#define lpfc_cntl_attr_num_netfil_WORD		word130
+	uint32_t reserved2[4];
+};
+
+struct lpfc_mbx_get_cntl_attributes {
+	union  lpfc_sli4_cfg_shdr cfg_shdr;
+	struct lpfc_controller_attribute cntl_attr;
+};
+
+struct lpfc_mbx_get_port_name {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word4;
+#define lpfc_mbx_get_port_name_lnk_type_SHIFT	0
+#define lpfc_mbx_get_port_name_lnk_type_MASK	0x00000003
+#define lpfc_mbx_get_port_name_lnk_type_WORD	word4
+		} request;
+		struct {
+			uint32_t word4;
+#define lpfc_mbx_get_port_name_name0_SHIFT	0
+#define lpfc_mbx_get_port_name_name0_MASK	0x000000FF
+#define lpfc_mbx_get_port_name_name0_WORD	word4
+#define lpfc_mbx_get_port_name_name1_SHIFT	8
+#define lpfc_mbx_get_port_name_name1_MASK	0x000000FF
+#define lpfc_mbx_get_port_name_name1_WORD	word4
+#define lpfc_mbx_get_port_name_name2_SHIFT	16
+#define lpfc_mbx_get_port_name_name2_MASK	0x000000FF
+#define lpfc_mbx_get_port_name_name2_WORD	word4
+#define lpfc_mbx_get_port_name_name3_SHIFT	24
+#define lpfc_mbx_get_port_name_name3_MASK	0x000000FF
+#define lpfc_mbx_get_port_name_name3_WORD	word4
+#define LPFC_LINK_NUMBER_0			0
+#define LPFC_LINK_NUMBER_1			1
+#define LPFC_LINK_NUMBER_2			2
+#define LPFC_LINK_NUMBER_3			3
+		} response;
+	} u;
+};
+
+/* Mailbox Completion Queue Error Messages */
+#define MB_CQE_STATUS_SUCCESS			0x0
+#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES	0x1
+#define MB_CQE_STATUS_INVALID_PARAMETER		0x2
+#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES	0x3
+#define MB_CEQ_STATUS_QUEUE_FLUSHING		0x4
+#define MB_CQE_STATUS_DMA_FAILED		0x5
+
+#define LPFC_MBX_WR_CONFIG_MAX_BDE		8
+struct lpfc_mbx_wr_object {
+	struct mbox_header header;
+	union {
+		struct {
+			uint32_t word4;
+#define lpfc_wr_object_eof_SHIFT		31
+#define lpfc_wr_object_eof_MASK			0x00000001
+#define lpfc_wr_object_eof_WORD			word4
+#define lpfc_wr_object_write_length_SHIFT	0
+#define lpfc_wr_object_write_length_MASK	0x00FFFFFF
+#define lpfc_wr_object_write_length_WORD	word4
+			uint32_t write_offset;
+			uint32_t object_name[26];
+			uint32_t bde_count;
+			struct ulp_bde64 bde[LPFC_MBX_WR_CONFIG_MAX_BDE];
+		} request;
+		struct {
+			uint32_t actual_write_length;
+		} response;
+	} u;
+};
+
+/* mailbox queue entry structure */
+struct lpfc_mqe {
+	uint32_t word0;
+#define lpfc_mqe_status_SHIFT		16
+#define lpfc_mqe_status_MASK		0x0000FFFF
+#define lpfc_mqe_status_WORD		word0
+#define lpfc_mqe_command_SHIFT		8
+#define lpfc_mqe_command_MASK		0x000000FF
+#define lpfc_mqe_command_WORD		word0
+	union {
+		uint32_t mb_words[LPFC_SLI4_MB_WORD_COUNT - 1];
+		/* sli4 mailbox commands */
+		struct lpfc_mbx_sli4_config sli4_config;
+		struct lpfc_mbx_init_vfi init_vfi;
+		struct lpfc_mbx_reg_vfi reg_vfi;
+		struct lpfc_mbx_reg_vfi unreg_vfi;
+		struct lpfc_mbx_init_vpi init_vpi;
+		struct lpfc_mbx_resume_rpi resume_rpi;
+		struct lpfc_mbx_read_fcf_tbl read_fcf_tbl;
+		struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry;
+		struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
+		struct lpfc_mbx_redisc_fcf_tbl redisc_fcf_tbl;
+		struct lpfc_mbx_reg_fcfi reg_fcfi;
+		struct lpfc_mbx_unreg_fcfi unreg_fcfi;
+		struct lpfc_mbx_mq_create mq_create;
+		struct lpfc_mbx_mq_create_ext mq_create_ext;
+		struct lpfc_mbx_eq_create eq_create;
+		struct lpfc_mbx_cq_create cq_create;
+		struct lpfc_mbx_wq_create wq_create;
+		struct lpfc_mbx_rq_create rq_create;
+		struct lpfc_mbx_mq_destroy mq_destroy;
+		struct lpfc_mbx_eq_destroy eq_destroy;
+		struct lpfc_mbx_cq_destroy cq_destroy;
+		struct lpfc_mbx_wq_destroy wq_destroy;
+		struct lpfc_mbx_rq_destroy rq_destroy;
+		struct lpfc_mbx_get_rsrc_extent_info rsrc_extent_info;
+		struct lpfc_mbx_alloc_rsrc_extents alloc_rsrc_extents;
+		struct lpfc_mbx_dealloc_rsrc_extents dealloc_rsrc_extents;
+		struct lpfc_mbx_post_sgl_pages post_sgl_pages;
+		struct lpfc_mbx_nembed_cmd nembed_cmd;
+		struct lpfc_mbx_read_rev read_rev;
+		struct lpfc_mbx_read_vpi read_vpi;
+		struct lpfc_mbx_read_config rd_config;
+		struct lpfc_mbx_request_features req_ftrs;
+		struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
+		struct lpfc_mbx_query_fw_cfg query_fw_cfg;
+		struct lpfc_mbx_supp_pages supp_pages;
+		struct lpfc_mbx_pc_sli4_params sli4_params;
+		struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
+		struct lpfc_mbx_set_link_diag_state link_diag_state;
+		struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
+		struct lpfc_mbx_run_link_diag_test link_diag_test;
+		struct lpfc_mbx_get_func_cfg get_func_cfg;
+		struct lpfc_mbx_get_prof_cfg get_prof_cfg;
+		struct lpfc_mbx_wr_object wr_object;
+		struct lpfc_mbx_get_port_name get_port_name;
+		struct lpfc_mbx_nop nop;
+	} un;
+};
+
+struct lpfc_mcqe {
+	uint32_t word0;
+#define lpfc_mcqe_status_SHIFT		0
+#define lpfc_mcqe_status_MASK		0x0000FFFF
+#define lpfc_mcqe_status_WORD		word0
+#define lpfc_mcqe_ext_status_SHIFT	16
+#define lpfc_mcqe_ext_status_MASK  	0x0000FFFF
+#define lpfc_mcqe_ext_status_WORD 	word0
+	uint32_t mcqe_tag0;
+	uint32_t mcqe_tag1;
+	uint32_t trailer;
+#define lpfc_trailer_valid_SHIFT	31
+#define lpfc_trailer_valid_MASK		0x00000001
+#define lpfc_trailer_valid_WORD		trailer
+#define lpfc_trailer_async_SHIFT	30
+#define lpfc_trailer_async_MASK		0x00000001
+#define lpfc_trailer_async_WORD		trailer
+#define lpfc_trailer_hpi_SHIFT		29
+#define lpfc_trailer_hpi_MASK		0x00000001
+#define lpfc_trailer_hpi_WORD		trailer
+#define lpfc_trailer_completed_SHIFT	28
+#define lpfc_trailer_completed_MASK	0x00000001
+#define lpfc_trailer_completed_WORD	trailer
+#define lpfc_trailer_consumed_SHIFT	27
+#define lpfc_trailer_consumed_MASK	0x00000001
+#define lpfc_trailer_consumed_WORD	trailer
+#define lpfc_trailer_type_SHIFT		16
+#define lpfc_trailer_type_MASK		0x000000FF
+#define lpfc_trailer_type_WORD		trailer
+#define lpfc_trailer_code_SHIFT		8
+#define lpfc_trailer_code_MASK		0x000000FF
+#define lpfc_trailer_code_WORD		trailer
+#define LPFC_TRAILER_CODE_LINK	0x1
+#define LPFC_TRAILER_CODE_FCOE	0x2
+#define LPFC_TRAILER_CODE_DCBX	0x3
+#define LPFC_TRAILER_CODE_GRP5	0x5
+#define LPFC_TRAILER_CODE_FC	0x10
+#define LPFC_TRAILER_CODE_SLI	0x11
+};
+
+struct lpfc_acqe_link {
+	uint32_t word0;
+#define lpfc_acqe_link_speed_SHIFT		24
+#define lpfc_acqe_link_speed_MASK		0x000000FF
+#define lpfc_acqe_link_speed_WORD		word0
+#define LPFC_ASYNC_LINK_SPEED_ZERO		0x0
+#define LPFC_ASYNC_LINK_SPEED_10MBPS		0x1
+#define LPFC_ASYNC_LINK_SPEED_100MBPS		0x2
+#define LPFC_ASYNC_LINK_SPEED_1GBPS		0x3
+#define LPFC_ASYNC_LINK_SPEED_10GBPS		0x4
+#define lpfc_acqe_link_duplex_SHIFT		16
+#define lpfc_acqe_link_duplex_MASK		0x000000FF
+#define lpfc_acqe_link_duplex_WORD		word0
+#define LPFC_ASYNC_LINK_DUPLEX_NONE		0x0
+#define LPFC_ASYNC_LINK_DUPLEX_HALF		0x1
+#define LPFC_ASYNC_LINK_DUPLEX_FULL		0x2
+#define lpfc_acqe_link_status_SHIFT		8
+#define lpfc_acqe_link_status_MASK		0x000000FF
+#define lpfc_acqe_link_status_WORD		word0
+#define LPFC_ASYNC_LINK_STATUS_DOWN		0x0
+#define LPFC_ASYNC_LINK_STATUS_UP		0x1
+#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN	0x2
+#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP	0x3
+#define lpfc_acqe_link_type_SHIFT		6
+#define lpfc_acqe_link_type_MASK		0x00000003
+#define lpfc_acqe_link_type_WORD		word0
+#define lpfc_acqe_link_number_SHIFT		0
+#define lpfc_acqe_link_number_MASK		0x0000003F
+#define lpfc_acqe_link_number_WORD		word0
+	uint32_t word1;
+#define lpfc_acqe_link_fault_SHIFT	0
+#define lpfc_acqe_link_fault_MASK	0x000000FF
+#define lpfc_acqe_link_fault_WORD	word1
+#define LPFC_ASYNC_LINK_FAULT_NONE	0x0
+#define LPFC_ASYNC_LINK_FAULT_LOCAL	0x1
+#define LPFC_ASYNC_LINK_FAULT_REMOTE	0x2
+#define lpfc_acqe_logical_link_speed_SHIFT	16
+#define lpfc_acqe_logical_link_speed_MASK	0x0000FFFF
+#define lpfc_acqe_logical_link_speed_WORD	word1
+	uint32_t event_tag;
+	uint32_t trailer;
+#define LPFC_LINK_EVENT_TYPE_PHYSICAL	0x0
+#define LPFC_LINK_EVENT_TYPE_VIRTUAL	0x1
+};
+
+struct lpfc_acqe_fip {
+	uint32_t index;
+	uint32_t word1;
+#define lpfc_acqe_fip_fcf_count_SHIFT		0
+#define lpfc_acqe_fip_fcf_count_MASK		0x0000FFFF
+#define lpfc_acqe_fip_fcf_count_WORD		word1
+#define lpfc_acqe_fip_event_type_SHIFT		16
+#define lpfc_acqe_fip_event_type_MASK		0x0000FFFF
+#define lpfc_acqe_fip_event_type_WORD		word1
+	uint32_t event_tag;
+	uint32_t trailer;
+#define LPFC_FIP_EVENT_TYPE_NEW_FCF		0x1
+#define LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL	0x2
+#define LPFC_FIP_EVENT_TYPE_FCF_DEAD		0x3
+#define LPFC_FIP_EVENT_TYPE_CVL			0x4
+#define LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD	0x5
+};
+
+struct lpfc_acqe_dcbx {
+	uint32_t tlv_ttl;
+	uint32_t reserved;
+	uint32_t event_tag;
+	uint32_t trailer;
+};
+
+struct lpfc_acqe_grp5 {
+	uint32_t word0;
+#define lpfc_acqe_grp5_type_SHIFT		6
+#define lpfc_acqe_grp5_type_MASK		0x00000003
+#define lpfc_acqe_grp5_type_WORD		word0
+#define lpfc_acqe_grp5_number_SHIFT		0
+#define lpfc_acqe_grp5_number_MASK		0x0000003F
+#define lpfc_acqe_grp5_number_WORD		word0
+	uint32_t word1;
+#define lpfc_acqe_grp5_llink_spd_SHIFT	16
+#define lpfc_acqe_grp5_llink_spd_MASK	0x0000FFFF
+#define lpfc_acqe_grp5_llink_spd_WORD	word1
+	uint32_t event_tag;
+	uint32_t trailer;
+};
+
+struct lpfc_acqe_fc_la {
+	uint32_t word0;
+#define lpfc_acqe_fc_la_speed_SHIFT		24
+#define lpfc_acqe_fc_la_speed_MASK		0x000000FF
+#define lpfc_acqe_fc_la_speed_WORD		word0
+#define LPFC_FC_LA_SPEED_UNKOWN		0x0
+#define LPFC_FC_LA_SPEED_1G		0x1
+#define LPFC_FC_LA_SPEED_2G		0x2
+#define LPFC_FC_LA_SPEED_4G		0x4
+#define LPFC_FC_LA_SPEED_8G		0x8
+#define LPFC_FC_LA_SPEED_10G		0xA
+#define LPFC_FC_LA_SPEED_16G		0x10
+#define lpfc_acqe_fc_la_topology_SHIFT		16
+#define lpfc_acqe_fc_la_topology_MASK		0x000000FF
+#define lpfc_acqe_fc_la_topology_WORD		word0
+#define LPFC_FC_LA_TOP_UNKOWN		0x0
+#define LPFC_FC_LA_TOP_P2P		0x1
+#define LPFC_FC_LA_TOP_FCAL		0x2
+#define LPFC_FC_LA_TOP_INTERNAL_LOOP	0x3
+#define LPFC_FC_LA_TOP_SERDES_LOOP	0x4
+#define lpfc_acqe_fc_la_att_type_SHIFT		8
+#define lpfc_acqe_fc_la_att_type_MASK		0x000000FF
+#define lpfc_acqe_fc_la_att_type_WORD		word0
+#define LPFC_FC_LA_TYPE_LINK_UP		0x1
+#define LPFC_FC_LA_TYPE_LINK_DOWN	0x2
+#define LPFC_FC_LA_TYPE_NO_HARD_ALPA	0x3
+#define lpfc_acqe_fc_la_port_type_SHIFT		6
+#define lpfc_acqe_fc_la_port_type_MASK		0x00000003
+#define lpfc_acqe_fc_la_port_type_WORD		word0
+#define LPFC_LINK_TYPE_ETHERNET		0x0
+#define LPFC_LINK_TYPE_FC		0x1
+#define lpfc_acqe_fc_la_port_number_SHIFT	0
+#define lpfc_acqe_fc_la_port_number_MASK	0x0000003F
+#define lpfc_acqe_fc_la_port_number_WORD	word0
+	uint32_t word1;
+#define lpfc_acqe_fc_la_llink_spd_SHIFT		16
+#define lpfc_acqe_fc_la_llink_spd_MASK		0x0000FFFF
+#define lpfc_acqe_fc_la_llink_spd_WORD		word1
+#define lpfc_acqe_fc_la_fault_SHIFT		0
+#define lpfc_acqe_fc_la_fault_MASK		0x000000FF
+#define lpfc_acqe_fc_la_fault_WORD		word1
+#define LPFC_FC_LA_FAULT_NONE		0x0
+#define LPFC_FC_LA_FAULT_LOCAL		0x1
+#define LPFC_FC_LA_FAULT_REMOTE		0x2
+	uint32_t event_tag;
+	uint32_t trailer;
+#define LPFC_FC_LA_EVENT_TYPE_FC_LINK		0x1
+#define LPFC_FC_LA_EVENT_TYPE_SHARED_LINK	0x2
+};
+
+struct lpfc_acqe_sli {
+	uint32_t event_data1;
+	uint32_t event_data2;
+	uint32_t reserved;
+	uint32_t trailer;
+#define LPFC_SLI_EVENT_TYPE_PORT_ERROR		0x1
+#define LPFC_SLI_EVENT_TYPE_OVER_TEMP		0x2
+#define LPFC_SLI_EVENT_TYPE_NORM_TEMP		0x3
+#define LPFC_SLI_EVENT_TYPE_NVLOG_POST		0x4
+#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP		0x5
+};
+
+/*
+ * Define the bootstrap mailbox (bmbx) region used to communicate
+ * mailbox command between the host and port. The mailbox consists
+ * of a payload area of 256 bytes and a completion queue of length
+ * 16 bytes.
+ */
+struct lpfc_bmbx_create {
+	struct lpfc_mqe mqe;
+	struct lpfc_mcqe mcqe;
+};
+
+#define SGL_ALIGN_SZ 64
+#define SGL_PAGE_SIZE 4096
+/* align SGL addr on a size boundary - adjust address up */
+#define NO_XRI  0xffff
+
+struct wqe_common {
+	uint32_t word6;
+#define wqe_xri_tag_SHIFT     0
+#define wqe_xri_tag_MASK      0x0000FFFF
+#define wqe_xri_tag_WORD      word6
+#define wqe_ctxt_tag_SHIFT    16
+#define wqe_ctxt_tag_MASK     0x0000FFFF
+#define wqe_ctxt_tag_WORD     word6
+	uint32_t word7;
+#define wqe_dif_SHIFT         0
+#define wqe_dif_MASK          0x00000003
+#define wqe_dif_WORD          word7
+#define wqe_ct_SHIFT          2
+#define wqe_ct_MASK           0x00000003
+#define wqe_ct_WORD           word7
+#define wqe_status_SHIFT      4
+#define wqe_status_MASK       0x0000000f
+#define wqe_status_WORD       word7
+#define wqe_cmnd_SHIFT        8
+#define wqe_cmnd_MASK         0x000000ff
+#define wqe_cmnd_WORD         word7
+#define wqe_class_SHIFT       16
+#define wqe_class_MASK        0x00000007
+#define wqe_class_WORD        word7
+#define wqe_ar_SHIFT          19
+#define wqe_ar_MASK           0x00000001
+#define wqe_ar_WORD           word7
+#define wqe_ag_SHIFT          wqe_ar_SHIFT
+#define wqe_ag_MASK           wqe_ar_MASK
+#define wqe_ag_WORD           wqe_ar_WORD
+#define wqe_pu_SHIFT          20
+#define wqe_pu_MASK           0x00000003
+#define wqe_pu_WORD           word7
+#define wqe_erp_SHIFT         22
+#define wqe_erp_MASK          0x00000001
+#define wqe_erp_WORD          word7
+#define wqe_conf_SHIFT        wqe_erp_SHIFT
+#define wqe_conf_MASK         wqe_erp_MASK
+#define wqe_conf_WORD         wqe_erp_WORD
+#define wqe_lnk_SHIFT         23
+#define wqe_lnk_MASK          0x00000001
+#define wqe_lnk_WORD          word7
+#define wqe_tmo_SHIFT         24
+#define wqe_tmo_MASK          0x000000ff
+#define wqe_tmo_WORD          word7
+	uint32_t abort_tag; /* word 8 in WQE */
+	uint32_t word9;
+#define wqe_reqtag_SHIFT      0
+#define wqe_reqtag_MASK       0x0000FFFF
+#define wqe_reqtag_WORD       word9
+#define wqe_temp_rpi_SHIFT    16
+#define wqe_temp_rpi_MASK     0x0000FFFF
+#define wqe_temp_rpi_WORD     word9
+#define wqe_rcvoxid_SHIFT     16
+#define wqe_rcvoxid_MASK      0x0000FFFF
+#define wqe_rcvoxid_WORD      word9
+	uint32_t word10;
+#define wqe_ebde_cnt_SHIFT    0
+#define wqe_ebde_cnt_MASK     0x0000000f
+#define wqe_ebde_cnt_WORD     word10
+#define wqe_lenloc_SHIFT      7
+#define wqe_lenloc_MASK       0x00000003
+#define wqe_lenloc_WORD       word10
+#define LPFC_WQE_LENLOC_NONE		0
+#define LPFC_WQE_LENLOC_WORD3	1
+#define LPFC_WQE_LENLOC_WORD12	2
+#define LPFC_WQE_LENLOC_WORD4	3
+#define wqe_qosd_SHIFT        9
+#define wqe_qosd_MASK         0x00000001
+#define wqe_qosd_WORD         word10
+#define wqe_xbl_SHIFT         11
+#define wqe_xbl_MASK          0x00000001
+#define wqe_xbl_WORD          word10
+#define wqe_iod_SHIFT         13
+#define wqe_iod_MASK          0x00000001
+#define wqe_iod_WORD          word10
+#define LPFC_WQE_IOD_WRITE	0
+#define LPFC_WQE_IOD_READ	1
+#define wqe_dbde_SHIFT        14
+#define wqe_dbde_MASK         0x00000001
+#define wqe_dbde_WORD         word10
+#define wqe_wqes_SHIFT        15
+#define wqe_wqes_MASK         0x00000001
+#define wqe_wqes_WORD         word10
+/* Note that this field overlaps above fields */
+#define wqe_wqid_SHIFT        1
+#define wqe_wqid_MASK         0x00007fff
+#define wqe_wqid_WORD         word10
+#define wqe_pri_SHIFT         16
+#define wqe_pri_MASK          0x00000007
+#define wqe_pri_WORD          word10
+#define wqe_pv_SHIFT          19
+#define wqe_pv_MASK           0x00000001
+#define wqe_pv_WORD           word10
+#define wqe_xc_SHIFT          21
+#define wqe_xc_MASK           0x00000001
+#define wqe_xc_WORD           word10
+#define wqe_sr_SHIFT          22
+#define wqe_sr_MASK           0x00000001
+#define wqe_sr_WORD           word10
+#define wqe_ccpe_SHIFT        23
+#define wqe_ccpe_MASK         0x00000001
+#define wqe_ccpe_WORD         word10
+#define wqe_ccp_SHIFT         24
+#define wqe_ccp_MASK          0x000000ff
+#define wqe_ccp_WORD          word10
+	uint32_t word11;
+#define wqe_cmd_type_SHIFT    0
+#define wqe_cmd_type_MASK     0x0000000f
+#define wqe_cmd_type_WORD     word11
+#define wqe_els_id_SHIFT      4
+#define wqe_els_id_MASK       0x00000003
+#define wqe_els_id_WORD       word11
+#define LPFC_ELS_ID_FLOGI	3
+#define LPFC_ELS_ID_FDISC	2
+#define LPFC_ELS_ID_LOGO	1
+#define LPFC_ELS_ID_DEFAULT	0
+#define wqe_wqec_SHIFT        7
+#define wqe_wqec_MASK         0x00000001
+#define wqe_wqec_WORD         word11
+#define wqe_cqid_SHIFT        16
+#define wqe_cqid_MASK         0x0000ffff
+#define wqe_cqid_WORD         word11
+#define LPFC_WQE_CQ_ID_DEFAULT	0xffff
+};
+
+struct wqe_did {
+	uint32_t word5;
+#define wqe_els_did_SHIFT         0
+#define wqe_els_did_MASK          0x00FFFFFF
+#define wqe_els_did_WORD          word5
+#define wqe_xmit_bls_pt_SHIFT         28
+#define wqe_xmit_bls_pt_MASK          0x00000003
+#define wqe_xmit_bls_pt_WORD          word5
+#define wqe_xmit_bls_ar_SHIFT         30
+#define wqe_xmit_bls_ar_MASK          0x00000001
+#define wqe_xmit_bls_ar_WORD          word5
+#define wqe_xmit_bls_xo_SHIFT         31
+#define wqe_xmit_bls_xo_MASK          0x00000001
+#define wqe_xmit_bls_xo_WORD          word5
+};
+
+struct lpfc_wqe_generic{
+	struct ulp_bde64 bde;
+	uint32_t word3;
+	uint32_t word4;
+	uint32_t word5;
+	struct wqe_common wqe_com;
+	uint32_t payload[4];
+};
+
+struct els_request64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t payload_len;
+	uint32_t word4;
+#define els_req64_sid_SHIFT         0
+#define els_req64_sid_MASK          0x00FFFFFF
+#define els_req64_sid_WORD          word4
+#define els_req64_sp_SHIFT          24
+#define els_req64_sp_MASK           0x00000001
+#define els_req64_sp_WORD           word4
+#define els_req64_vf_SHIFT          25
+#define els_req64_vf_MASK           0x00000001
+#define els_req64_vf_WORD           word4
+	struct wqe_did	wqe_dest;
+	struct wqe_common wqe_com; /* words 6-11 */
+	uint32_t word12;
+#define els_req64_vfid_SHIFT        1
+#define els_req64_vfid_MASK         0x00000FFF
+#define els_req64_vfid_WORD         word12
+#define els_req64_pri_SHIFT         13
+#define els_req64_pri_MASK          0x00000007
+#define els_req64_pri_WORD          word12
+	uint32_t word13;
+#define els_req64_hopcnt_SHIFT      24
+#define els_req64_hopcnt_MASK       0x000000ff
+#define els_req64_hopcnt_WORD       word13
+	uint32_t reserved[2];
+};
+
+struct xmit_els_rsp64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t response_payload_len;
+	uint32_t rsvd4;
+	struct wqe_did wqe_dest;
+	struct wqe_common wqe_com; /* words 6-11 */
+	uint32_t word12;
+#define wqe_rsp_temp_rpi_SHIFT    0
+#define wqe_rsp_temp_rpi_MASK     0x0000FFFF
+#define wqe_rsp_temp_rpi_WORD     word12
+	uint32_t rsvd_13_15[3];
+};
+
+struct xmit_bls_rsp64_wqe {
+	uint32_t payload0;
+/* Payload0 for BA_ACC */
+#define xmit_bls_rsp64_acc_seq_id_SHIFT        16
+#define xmit_bls_rsp64_acc_seq_id_MASK         0x000000ff
+#define xmit_bls_rsp64_acc_seq_id_WORD         payload0
+#define xmit_bls_rsp64_acc_seq_id_vald_SHIFT   24
+#define xmit_bls_rsp64_acc_seq_id_vald_MASK    0x000000ff
+#define xmit_bls_rsp64_acc_seq_id_vald_WORD    payload0
+/* Payload0 for BA_RJT */
+#define xmit_bls_rsp64_rjt_vspec_SHIFT   0
+#define xmit_bls_rsp64_rjt_vspec_MASK    0x000000ff
+#define xmit_bls_rsp64_rjt_vspec_WORD    payload0
+#define xmit_bls_rsp64_rjt_expc_SHIFT    8
+#define xmit_bls_rsp64_rjt_expc_MASK     0x000000ff
+#define xmit_bls_rsp64_rjt_expc_WORD     payload0
+#define xmit_bls_rsp64_rjt_rsnc_SHIFT    16
+#define xmit_bls_rsp64_rjt_rsnc_MASK     0x000000ff
+#define xmit_bls_rsp64_rjt_rsnc_WORD     payload0
+	uint32_t word1;
+#define xmit_bls_rsp64_rxid_SHIFT  0
+#define xmit_bls_rsp64_rxid_MASK   0x0000ffff
+#define xmit_bls_rsp64_rxid_WORD   word1
+#define xmit_bls_rsp64_oxid_SHIFT  16
+#define xmit_bls_rsp64_oxid_MASK   0x0000ffff
+#define xmit_bls_rsp64_oxid_WORD   word1
+	uint32_t word2;
+#define xmit_bls_rsp64_seqcnthi_SHIFT  0
+#define xmit_bls_rsp64_seqcnthi_MASK   0x0000ffff
+#define xmit_bls_rsp64_seqcnthi_WORD   word2
+#define xmit_bls_rsp64_seqcntlo_SHIFT  16
+#define xmit_bls_rsp64_seqcntlo_MASK   0x0000ffff
+#define xmit_bls_rsp64_seqcntlo_WORD   word2
+	uint32_t rsrvd3;
+	uint32_t rsrvd4;
+	struct wqe_did	wqe_dest;
+	struct wqe_common wqe_com; /* words 6-11 */
+	uint32_t word12;
+#define xmit_bls_rsp64_temprpi_SHIFT  0
+#define xmit_bls_rsp64_temprpi_MASK   0x0000ffff
+#define xmit_bls_rsp64_temprpi_WORD   word12
+	uint32_t rsvd_13_15[3];
+};
+
+struct wqe_rctl_dfctl {
+	uint32_t word5;
+#define wqe_si_SHIFT 2
+#define wqe_si_MASK  0x000000001
+#define wqe_si_WORD  word5
+#define wqe_la_SHIFT 3
+#define wqe_la_MASK  0x000000001
+#define wqe_la_WORD  word5
+#define wqe_xo_SHIFT	6
+#define wqe_xo_MASK	0x000000001
+#define wqe_xo_WORD	word5
+#define wqe_ls_SHIFT 7
+#define wqe_ls_MASK  0x000000001
+#define wqe_ls_WORD  word5
+#define wqe_dfctl_SHIFT 8
+#define wqe_dfctl_MASK  0x0000000ff
+#define wqe_dfctl_WORD  word5
+#define wqe_type_SHIFT 16
+#define wqe_type_MASK  0x0000000ff
+#define wqe_type_WORD  word5
+#define wqe_rctl_SHIFT 24
+#define wqe_rctl_MASK  0x0000000ff
+#define wqe_rctl_WORD  word5
+};
+
+struct xmit_seq64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t rsvd3;
+	uint32_t relative_offset;
+	struct wqe_rctl_dfctl wge_ctl;
+	struct wqe_common wqe_com; /* words 6-11 */
+	uint32_t xmit_len;
+	uint32_t rsvd_12_15[3];
+};
+struct xmit_bcast64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t seq_payload_len;
+	uint32_t rsvd4;
+	struct wqe_rctl_dfctl wge_ctl; /* word 5 */
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsvd_12_15[4];
+};
+
+struct gen_req64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t request_payload_len;
+	uint32_t relative_offset;
+	struct wqe_rctl_dfctl wge_ctl; /* word 5 */
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsvd_12_15[4];
+};
+
+struct create_xri_wqe {
+	uint32_t rsrvd[5];           /* words 0-4 */
+	struct wqe_did	wqe_dest;  /* word 5 */
+	struct wqe_common wqe_com; /* words 6-11 */
+	uint32_t rsvd_12_15[4];         /* word 12-15 */
+};
+
+#define T_REQUEST_TAG 3
+#define T_XRI_TAG 1
+
+struct abort_cmd_wqe {
+	uint32_t rsrvd[3];
+	uint32_t word3;
+#define	abort_cmd_ia_SHIFT  0
+#define	abort_cmd_ia_MASK  0x000000001
+#define	abort_cmd_ia_WORD  word3
+#define	abort_cmd_criteria_SHIFT  8
+#define	abort_cmd_criteria_MASK  0x0000000ff
+#define	abort_cmd_criteria_WORD  word3
+	uint32_t rsrvd4;
+	uint32_t rsrvd5;
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsvd_12_15[4];         /* word 12-15 */
+};
+
+struct fcp_iwrite64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t payload_offset_len;
+	uint32_t total_xfer_len;
+	uint32_t initial_xfer_len;
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsrvd12;
+	struct ulp_bde64 ph_bde;       /* words 13-15 */
+};
+
+struct fcp_iread64_wqe {
+	struct ulp_bde64 bde;
+	uint32_t payload_offset_len;   /* word 3 */
+	uint32_t total_xfer_len;       /* word 4 */
+	uint32_t rsrvd5;               /* word 5 */
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsrvd12;
+	struct ulp_bde64 ph_bde;       /* words 13-15 */
+};
+
+struct fcp_icmnd64_wqe {
+	struct ulp_bde64 bde;          /* words 0-2 */
+	uint32_t rsrvd3;               /* word 3 */
+	uint32_t rsrvd4;               /* word 4 */
+	uint32_t rsrvd5;               /* word 5 */
+	struct wqe_common wqe_com;     /* words 6-11 */
+	uint32_t rsvd_12_15[4];        /* word 12-15 */
+};
+
+
+union lpfc_wqe {
+	uint32_t words[16];
+	struct lpfc_wqe_generic generic;
+	struct fcp_icmnd64_wqe fcp_icmd;
+	struct fcp_iread64_wqe fcp_iread;
+	struct fcp_iwrite64_wqe fcp_iwrite;
+	struct abort_cmd_wqe abort_cmd;
+	struct create_xri_wqe create_xri;
+	struct xmit_bcast64_wqe xmit_bcast64;
+	struct xmit_seq64_wqe xmit_sequence;
+	struct xmit_bls_rsp64_wqe xmit_bls_rsp;
+	struct xmit_els_rsp64_wqe xmit_els_rsp;
+	struct els_request64_wqe els_req;
+	struct gen_req64_wqe gen_req;
+};
+
+#define LPFC_GROUP_OJECT_MAGIC_NUM		0xfeaa0001
+#define LPFC_FILE_TYPE_GROUP			0xf7
+#define LPFC_FILE_ID_GROUP			0xa2
+struct lpfc_grp_hdr {
+	uint32_t size;
+	uint32_t magic_number;
+	uint32_t word2;
+#define lpfc_grp_hdr_file_type_SHIFT	24
+#define lpfc_grp_hdr_file_type_MASK	0x000000FF
+#define lpfc_grp_hdr_file_type_WORD	word2
+#define lpfc_grp_hdr_id_SHIFT		16
+#define lpfc_grp_hdr_id_MASK		0x000000FF
+#define lpfc_grp_hdr_id_WORD		word2
+	uint8_t rev_name[128];
+	uint8_t date[12];
+	uint8_t revision[32];
+};
+
+#define FCP_COMMAND 0x0
+#define FCP_COMMAND_DATA_OUT 0x1
+#define ELS_COMMAND_NON_FIP 0xC
+#define ELS_COMMAND_FIP 0xD
+#define OTHER_COMMAND 0x8
+
+#define LPFC_FW_DUMP	1
+#define LPFC_FW_RESET	2
+#define LPFC_DV_RESET	3
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_init.c b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_init.c
new file mode 100644
index 0000000..9598fdc
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_init.c
@@ -0,0 +1,10329 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/ctype.h>
+#include <linux/aer.h>
+#include <linux/slab.h>
+#include <linux/firmware.h>
+#include <linux/miscdevice.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_version.h"
+
+char *_dump_buf_data;
+unsigned long _dump_buf_data_order;
+char *_dump_buf_dif;
+unsigned long _dump_buf_dif_order;
+spinlock_t _dump_buf_lock;
+
+static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
+static int lpfc_post_rcv_buf(struct lpfc_hba *);
+static int lpfc_sli4_queue_verify(struct lpfc_hba *);
+static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
+static int lpfc_setup_endian_order(struct lpfc_hba *);
+static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
+static void lpfc_free_sgl_list(struct lpfc_hba *);
+static int lpfc_init_sgl_list(struct lpfc_hba *);
+static int lpfc_init_active_sgl_array(struct lpfc_hba *);
+static void lpfc_free_active_sgl(struct lpfc_hba *);
+static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
+static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
+static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
+static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
+static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
+
+static struct scsi_transport_template *lpfc_transport_template = NULL;
+static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
+static DEFINE_IDR(lpfc_hba_index);
+
+/**
+ * lpfc_config_port_prep - Perform lpfc initialization prior to config port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
+ * mailbox command. It retrieves the revision information from the HBA and
+ * collects the Vital Product Data (VPD) about the HBA for preparing the
+ * configuration of the HBA.
+ *
+ * Return codes:
+ *   0 - success.
+ *   -ERESTART - requests the SLI layer to reset the HBA and try again.
+ *   Any other value - indicates an error.
+ **/
+int
+lpfc_config_port_prep(struct lpfc_hba *phba)
+{
+	lpfc_vpd_t *vp = &phba->vpd;
+	int i = 0, rc;
+	LPFC_MBOXQ_t *pmb;
+	MAILBOX_t *mb;
+	char *lpfc_vpd_data = NULL;
+	uint16_t offset = 0;
+	static char licensed[56] =
+		    "key unlock for use with gnu public licensed code only\0";
+	static int init_key = 1;
+
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return -ENOMEM;
+	}
+
+	mb = &pmb->u.mb;
+	phba->link_state = LPFC_INIT_MBX_CMDS;
+
+	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
+		if (init_key) {
+			uint32_t *ptext = (uint32_t *) licensed;
+
+			for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
+				*ptext = cpu_to_be32(*ptext);
+			init_key = 0;
+		}
+
+		lpfc_read_nv(phba, pmb);
+		memset((char*)mb->un.varRDnvp.rsvd3, 0,
+			sizeof (mb->un.varRDnvp.rsvd3));
+		memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
+			 sizeof (licensed));
+
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+					"0324 Config Port initialization "
+					"error, mbxCmd x%x READ_NVPARM, "
+					"mbxStatus x%x\n",
+					mb->mbxCommand, mb->mbxStatus);
+			mempool_free(pmb, phba->mbox_mem_pool);
+			return -ERESTART;
+		}
+		memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
+		       sizeof(phba->wwnn));
+		memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
+		       sizeof(phba->wwpn));
+	}
+
+	phba->sli3_options = 0x0;
+
+	/* Setup and issue mailbox READ REV command */
+	lpfc_read_rev(phba, pmb);
+	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+	if (rc != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0439 Adapter failed to init, mbxCmd x%x "
+				"READ_REV, mbxStatus x%x\n",
+				mb->mbxCommand, mb->mbxStatus);
+		mempool_free( pmb, phba->mbox_mem_pool);
+		return -ERESTART;
+	}
+
+
+	/*
+	 * The value of rr must be 1 since the driver set the cv field to 1.
+	 * This setting requires the FW to set all revision fields.
+	 */
+	if (mb->un.varRdRev.rr == 0) {
+		vp->rev.rBit = 0;
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0440 Adapter failed to init, READ_REV has "
+				"missing revision information.\n");
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return -ERESTART;
+	}
+
+	if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return -EINVAL;
+	}
+
+	/* Save information as VPD data */
+	vp->rev.rBit = 1;
+	memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
+	vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
+	memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
+	vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
+	memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
+	vp->rev.biuRev = mb->un.varRdRev.biuRev;
+	vp->rev.smRev = mb->un.varRdRev.smRev;
+	vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
+	vp->rev.endecRev = mb->un.varRdRev.endecRev;
+	vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
+	vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
+	vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
+	vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
+	vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
+	vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
+
+	/* If the sli feature level is less then 9, we must
+	 * tear down all RPIs and VPIs on link down if NPIV
+	 * is enabled.
+	 */
+	if (vp->rev.feaLevelHigh < 9)
+		phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
+
+	if (lpfc_is_LC_HBA(phba->pcidev->device))
+		memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
+						sizeof (phba->RandomData));
+
+	/* Get adapter VPD information */
+	lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
+	if (!lpfc_vpd_data)
+		goto out_free_mbox;
+	do {
+		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"0441 VPD not present on adapter, "
+					"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
+					mb->mbxCommand, mb->mbxStatus);
+			mb->un.varDmp.word_cnt = 0;
+		}
+		/* dump mem may return a zero when finished or we got a
+		 * mailbox error, either way we are done.
+		 */
+		if (mb->un.varDmp.word_cnt == 0)
+			break;
+		if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
+			mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
+		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
+				      lpfc_vpd_data + offset,
+				      mb->un.varDmp.word_cnt);
+		offset += mb->un.varDmp.word_cnt;
+	} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
+	lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
+
+	kfree(lpfc_vpd_data);
+out_free_mbox:
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return 0;
+}
+
+/**
+ * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This is the completion handler for driver's configuring asynchronous event
+ * mailbox command to the device. If the mailbox command returns successfully,
+ * it will set internal async event support flag to 1; otherwise, it will
+ * set internal async event support flag to 0.
+ **/
+static void
+lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
+{
+	if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
+		phba->temp_sensor_support = 1;
+	else
+		phba->temp_sensor_support = 0;
+	mempool_free(pmboxq, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
+ * @phba: pointer to lpfc hba data structure.
+ * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This is the completion handler for dump mailbox command for getting
+ * wake up parameters. When this command complete, the response contain
+ * Option rom version of the HBA. This function translate the version number
+ * into a human readable string and store it in OptionROMVersion.
+ **/
+static void
+lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+	struct prog_id *prg;
+	uint32_t prog_id_word;
+	char dist = ' ';
+	/* character array used for decoding dist type. */
+	char dist_char[] = "nabx";
+
+	if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
+		mempool_free(pmboxq, phba->mbox_mem_pool);
+		return;
+	}
+
+	prg = (struct prog_id *) &prog_id_word;
+
+	/* word 7 contain option rom version */
+	prog_id_word = pmboxq->u.mb.un.varWords[7];
+
+	/* Decode the Option rom version word to a readable string */
+	if (prg->dist < 4)
+		dist = dist_char[prg->dist];
+
+	if ((prg->dist == 3) && (prg->num == 0))
+		sprintf(phba->OptionROMVersion, "%d.%d%d",
+			prg->ver, prg->rev, prg->lev);
+	else
+		sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
+			prg->ver, prg->rev, prg->lev,
+			dist, prg->num);
+	mempool_free(pmboxq, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
+ *	cfg_soft_wwnn, cfg_soft_wwpn
+ * @vport: pointer to lpfc vport data structure.
+ *
+ *
+ * Return codes
+ *   None.
+ **/
+void
+lpfc_update_vport_wwn(struct lpfc_vport *vport)
+{
+	/* If the soft name exists then update it using the service params */
+	if (vport->phba->cfg_soft_wwnn)
+		u64_to_wwn(vport->phba->cfg_soft_wwnn,
+			   vport->fc_sparam.nodeName.u.wwn);
+	if (vport->phba->cfg_soft_wwpn)
+		u64_to_wwn(vport->phba->cfg_soft_wwpn,
+			   vport->fc_sparam.portName.u.wwn);
+
+	/*
+	 * If the name is empty or there exists a soft name
+	 * then copy the service params name, otherwise use the fc name
+	 */
+	if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
+		memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
+			sizeof(struct lpfc_name));
+	else
+		memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
+			sizeof(struct lpfc_name));
+
+	if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
+		memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+			sizeof(struct lpfc_name));
+	else
+		memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
+			sizeof(struct lpfc_name));
+}
+
+/**
+ * lpfc_config_port_post - Perform lpfc initialization after config port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine will do LPFC initialization after the CONFIG_PORT mailbox
+ * command call. It performs all internal resource and state setups on the
+ * port: post IOCB buffers, enable appropriate host interrupt attentions,
+ * ELS ring timers, etc.
+ *
+ * Return codes
+ *   0 - success.
+ *   Any other value - error.
+ **/
+int
+lpfc_config_port_post(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	LPFC_MBOXQ_t *pmb;
+	MAILBOX_t *mb;
+	struct lpfc_dmabuf *mp;
+	struct lpfc_sli *psli = &phba->sli;
+	uint32_t status, timeout;
+	int i, j;
+	int rc;
+
+	spin_lock_irq(&phba->hbalock);
+	/*
+	 * If the Config port completed correctly the HBA is not
+	 * over heated any more.
+	 */
+	if (phba->over_temp_state == HBA_OVER_TEMP)
+		phba->over_temp_state = HBA_NORMAL_TEMP;
+	spin_unlock_irq(&phba->hbalock);
+
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return -ENOMEM;
+	}
+	mb = &pmb->u.mb;
+
+	/* Get login parameters for NID.  */
+	rc = lpfc_read_sparam(phba, pmb, 0);
+	if (rc) {
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return -ENOMEM;
+	}
+
+	pmb->vport = vport;
+	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0448 Adapter failed init, mbxCmd x%x "
+				"READ_SPARM mbxStatus x%x\n",
+				mb->mbxCommand, mb->mbxStatus);
+		phba->link_state = LPFC_HBA_ERROR;
+		mp = (struct lpfc_dmabuf *) pmb->context1;
+		mempool_free(pmb, phba->mbox_mem_pool);
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+		return -EIO;
+	}
+
+	mp = (struct lpfc_dmabuf *) pmb->context1;
+
+	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	pmb->context1 = NULL;
+	lpfc_update_vport_wwn(vport);
+
+	/* Update the fc_host data structures with new wwn. */
+	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
+	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
+	fc_host_max_npiv_vports(shost) = phba->max_vpi;
+
+	/* If no serial number in VPD data, use low 6 bytes of WWNN */
+	/* This should be consolidated into parse_vpd ? - mr */
+	if (phba->SerialNumber[0] == 0) {
+		uint8_t *outptr;
+
+		outptr = &vport->fc_nodename.u.s.IEEE[0];
+		for (i = 0; i < 12; i++) {
+			status = *outptr++;
+			j = ((status & 0xf0) >> 4);
+			if (j <= 9)
+				phba->SerialNumber[i] =
+				    (char)((uint8_t) 0x30 + (uint8_t) j);
+			else
+				phba->SerialNumber[i] =
+				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
+			i++;
+			j = (status & 0xf);
+			if (j <= 9)
+				phba->SerialNumber[i] =
+				    (char)((uint8_t) 0x30 + (uint8_t) j);
+			else
+				phba->SerialNumber[i] =
+				    (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
+		}
+	}
+
+	lpfc_read_config(phba, pmb);
+	pmb->vport = vport;
+	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0453 Adapter failed to init, mbxCmd x%x "
+				"READ_CONFIG, mbxStatus x%x\n",
+				mb->mbxCommand, mb->mbxStatus);
+		phba->link_state = LPFC_HBA_ERROR;
+		mempool_free( pmb, phba->mbox_mem_pool);
+		return -EIO;
+	}
+
+	/* Check if the port is disabled */
+	lpfc_sli_read_link_ste(phba);
+
+	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
+	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
+		phba->cfg_hba_queue_depth =
+			(mb->un.varRdConfig.max_xri + 1) -
+					lpfc_sli4_get_els_iocb_cnt(phba);
+
+	phba->lmt = mb->un.varRdConfig.lmt;
+
+	/* Get the default values for Model Name and Description */
+	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
+
+	phba->link_state = LPFC_LINK_DOWN;
+
+	/* Only process IOCBs on ELS ring till hba_state is READY */
+	if (psli->ring[psli->extra_ring].cmdringaddr)
+		psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
+	if (psli->ring[psli->fcp_ring].cmdringaddr)
+		psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
+	if (psli->ring[psli->next_ring].cmdringaddr)
+		psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
+
+	/* Post receive buffers for desired rings */
+	if (phba->sli_rev != 3)
+		lpfc_post_rcv_buf(phba);
+
+	/*
+	 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
+	 */
+	if (phba->intr_type == MSIX) {
+		rc = lpfc_config_msi(phba, pmb);
+		if (rc) {
+			mempool_free(pmb, phba->mbox_mem_pool);
+			return -EIO;
+		}
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+					"0352 Config MSI mailbox command "
+					"failed, mbxCmd x%x, mbxStatus x%x\n",
+					pmb->u.mb.mbxCommand,
+					pmb->u.mb.mbxStatus);
+			mempool_free(pmb, phba->mbox_mem_pool);
+			return -EIO;
+		}
+	}
+
+	spin_lock_irq(&phba->hbalock);
+	/* Initialize ERATT handling flag */
+	phba->hba_flag &= ~HBA_ERATT_HANDLED;
+
+	/* Enable appropriate host interrupts */
+	if (lpfc_readl(phba->HCregaddr, &status)) {
+		spin_unlock_irq(&phba->hbalock);
+		return -EIO;
+	}
+	status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
+	if (psli->num_rings > 0)
+		status |= HC_R0INT_ENA;
+	if (psli->num_rings > 1)
+		status |= HC_R1INT_ENA;
+	if (psli->num_rings > 2)
+		status |= HC_R2INT_ENA;
+	if (psli->num_rings > 3)
+		status |= HC_R3INT_ENA;
+
+	if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
+	    (phba->cfg_poll & DISABLE_FCP_RING_INT))
+		status &= ~(HC_R0INT_ENA);
+
+	writel(status, phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Set up ring-0 (ELS) timer */
+	timeout = phba->fc_ratov * 2;
+	mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
+	/* Set up heart beat (HB) timer */
+	mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+	phba->hb_outstanding = 0;
+	phba->last_completion_time = jiffies;
+	/* Set up error attention (ERATT) polling timer */
+	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+
+	if (phba->hba_flag & LINK_DISABLED) {
+		lpfc_printf_log(phba,
+			KERN_ERR, LOG_INIT,
+			"2598 Adapter Link is disabled.\n");
+		lpfc_down_link(phba, pmb);
+		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+		if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
+			lpfc_printf_log(phba,
+			KERN_ERR, LOG_INIT,
+			"2599 Adapter failed to issue DOWN_LINK"
+			" mbox command rc 0x%x\n", rc);
+
+			mempool_free(pmb, phba->mbox_mem_pool);
+			return -EIO;
+		}
+	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
+		mempool_free(pmb, phba->mbox_mem_pool);
+		rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+		if (rc)
+			return rc;
+	}
+	/* MBOX buffer will be freed in mbox compl */
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return -ENOMEM;
+	}
+
+	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
+	pmb->mbox_cmpl = lpfc_config_async_cmpl;
+	pmb->vport = phba->pport;
+	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+
+	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
+		lpfc_printf_log(phba,
+				KERN_ERR,
+				LOG_INIT,
+				"0456 Adapter failed to issue "
+				"ASYNCEVT_ENABLE mbox status x%x\n",
+				rc);
+		mempool_free(pmb, phba->mbox_mem_pool);
+	}
+
+	/* Get Option rom version */
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return -ENOMEM;
+	}
+
+	lpfc_dump_wakeup_param(phba, pmb);
+	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
+	pmb->vport = phba->pport;
+	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+
+	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
+				"to get Option ROM version status x%x\n", rc);
+		mempool_free(pmb, phba->mbox_mem_pool);
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_hba_init_link - Initialize the FC link
+ * @phba: pointer to lpfc hba data structure.
+ * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
+ *
+ * This routine will issue the INIT_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use as a delayed link up mechanism with the
+ * module parameter lpfc_suppress_link_up.
+ *
+ * Return code
+ *		0 - success
+ *		Any other value - error
+ **/
+int
+lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
+{
+	return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
+}
+
+/**
+ * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
+ * @phba: pointer to lpfc hba data structure.
+ * @fc_topology: desired fc topology.
+ * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
+ *
+ * This routine will issue the INIT_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use as a delayed link up mechanism with the
+ * module parameter lpfc_suppress_link_up.
+ *
+ * Return code
+ *              0 - success
+ *              Any other value - error
+ **/
+int
+lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
+			       uint32_t flag)
+{
+	struct lpfc_vport *vport = phba->pport;
+	LPFC_MBOXQ_t *pmb;
+	MAILBOX_t *mb;
+	int rc;
+
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return -ENOMEM;
+	}
+	mb = &pmb->u.mb;
+	pmb->vport = vport;
+
+	if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
+	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
+	     !(phba->lmt & LMT_1Gb)) ||
+	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
+	     !(phba->lmt & LMT_2Gb)) ||
+	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
+	     !(phba->lmt & LMT_4Gb)) ||
+	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
+	     !(phba->lmt & LMT_8Gb)) ||
+	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
+	     !(phba->lmt & LMT_10Gb)) ||
+	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
+	     !(phba->lmt & LMT_16Gb))) {
+		/* Reset link speed to auto */
+		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+			"1302 Invalid speed for this board:%d "
+			"Reset link speed to auto.\n",
+			phba->cfg_link_speed);
+			phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
+	}
+	lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
+	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		lpfc_set_loopback_flag(phba);
+	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
+	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0498 Adapter failed to init, mbxCmd x%x "
+			"INIT_LINK, mbxStatus x%x\n",
+			mb->mbxCommand, mb->mbxStatus);
+		if (phba->sli_rev <= LPFC_SLI_REV3) {
+			/* Clear all interrupt enable conditions */
+			writel(0, phba->HCregaddr);
+			readl(phba->HCregaddr); /* flush */
+			/* Clear all pending interrupts */
+			writel(0xffffffff, phba->HAregaddr);
+			readl(phba->HAregaddr); /* flush */
+		}
+		phba->link_state = LPFC_HBA_ERROR;
+		if (rc != MBX_BUSY || flag == MBX_POLL)
+			mempool_free(pmb, phba->mbox_mem_pool);
+		return -EIO;
+	}
+	phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
+	if (flag == MBX_POLL)
+		mempool_free(pmb, phba->mbox_mem_pool);
+
+	return 0;
+}
+
+/**
+ * lpfc_hba_down_link - this routine downs the FC link
+ * @phba: pointer to lpfc hba data structure.
+ * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
+ *
+ * This routine will issue the DOWN_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use to stop the link.
+ *
+ * Return code
+ *		0 - success
+ *		Any other value - error
+ **/
+int
+lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
+{
+	LPFC_MBOXQ_t *pmb;
+	int rc;
+
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return -ENOMEM;
+	}
+
+	lpfc_printf_log(phba,
+		KERN_ERR, LOG_INIT,
+		"0491 Adapter Link is disabled.\n");
+	lpfc_down_link(phba, pmb);
+	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
+	if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
+		lpfc_printf_log(phba,
+		KERN_ERR, LOG_INIT,
+		"2522 Adapter failed to issue DOWN_LINK"
+		" mbox command rc 0x%x\n", rc);
+
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return -EIO;
+	}
+	if (flag == MBX_POLL)
+		mempool_free(pmb, phba->mbox_mem_pool);
+
+	return 0;
+}
+
+/**
+ * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will do LPFC uninitialization before the HBA is reset when
+ * bringing down the SLI Layer.
+ *
+ * Return codes
+ *   0 - success.
+ *   Any other value - error.
+ **/
+int
+lpfc_hba_down_prep(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	int i;
+
+	if (phba->sli_rev <= LPFC_SLI_REV3) {
+		/* Disable interrupts */
+		writel(0, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+	}
+
+	if (phba->pport->load_flag & FC_UNLOADING)
+		lpfc_cleanup_discovery_resources(phba->pport);
+	else {
+		vports = lpfc_create_vport_work_array(phba);
+		if (vports != NULL)
+			for (i = 0; i <= phba->max_vports &&
+				vports[i] != NULL; i++)
+				lpfc_cleanup_discovery_resources(vports[i]);
+		lpfc_destroy_vport_work_array(phba, vports);
+	}
+	return 0;
+}
+
+/**
+ * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will do uninitialization after the HBA is reset when bring
+ * down the SLI Layer.
+ *
+ * Return codes
+ *   0 - success.
+ *   Any other value - error.
+ **/
+static int
+lpfc_hba_down_post_s3(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *pring;
+	struct lpfc_dmabuf *mp, *next_mp;
+	LIST_HEAD(completions);
+	int i;
+
+	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
+		lpfc_sli_hbqbuf_free_all(phba);
+	else {
+		/* Cleanup preposted buffers on the ELS ring */
+		pring = &psli->ring[LPFC_ELS_RING];
+		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
+			list_del(&mp->list);
+			pring->postbufq_cnt--;
+			lpfc_mbuf_free(phba, mp->virt, mp->phys);
+			kfree(mp);
+		}
+	}
+
+	spin_lock_irq(&phba->hbalock);
+	for (i = 0; i < psli->num_rings; i++) {
+		pring = &psli->ring[i];
+
+		/* At this point in time the HBA is either reset or DOA. Either
+		 * way, nothing should be on txcmplq as it will NEVER complete.
+		 */
+		list_splice_init(&pring->txcmplq, &completions);
+		pring->txcmplq_cnt = 0;
+		spin_unlock_irq(&phba->hbalock);
+
+		/* Cancel all the IOCBs from the completions list */
+		lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+				      IOERR_SLI_ABORTED);
+
+		lpfc_sli_abort_iocb_ring(phba, pring);
+		spin_lock_irq(&phba->hbalock);
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	return 0;
+}
+
+/**
+ * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will do uninitialization after the HBA is reset when bring
+ * down the SLI Layer.
+ *
+ * Return codes
+ *   0 - success.
+ *   Any other value - error.
+ **/
+static int
+lpfc_hba_down_post_s4(struct lpfc_hba *phba)
+{
+	struct lpfc_scsi_buf *psb, *psb_next;
+	LIST_HEAD(aborts);
+	int ret;
+	unsigned long iflag = 0;
+	struct lpfc_sglq *sglq_entry = NULL;
+
+	ret = lpfc_hba_down_post_s3(phba);
+	if (ret)
+		return ret;
+	/* At this point in time the HBA is either reset or DOA. Either
+	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
+	 * on the lpfc_sgl_list so that it can either be freed if the
+	 * driver is unloading or reposted if the driver is restarting
+	 * the port.
+	 */
+	spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
+					/* scsl_buf_list */
+	/* abts_sgl_list_lock required because worker thread uses this
+	 * list.
+	 */
+	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
+	list_for_each_entry(sglq_entry,
+		&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
+		sglq_entry->state = SGL_FREED;
+
+	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
+			&phba->sli4_hba.lpfc_sgl_list);
+	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+	/* abts_scsi_buf_list_lock required because worker thread uses this
+	 * list.
+	 */
+	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+	list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
+			&aborts);
+	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+	spin_unlock_irq(&phba->hbalock);
+
+	list_for_each_entry_safe(psb, psb_next, &aborts, list) {
+		psb->pCmd = NULL;
+		psb->status = IOSTAT_SUCCESS;
+	}
+	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
+	list_splice(&aborts, &phba->lpfc_scsi_buf_list);
+	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+	return 0;
+}
+
+/**
+ * lpfc_hba_down_post - Wrapper func for hba down post routine
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine wraps the actual SLI3 or SLI4 routine for performing
+ * uninitialization after the HBA is reset when bring down the SLI Layer.
+ *
+ * Return codes
+ *   0 - success.
+ *   Any other value - error.
+ **/
+int
+lpfc_hba_down_post(struct lpfc_hba *phba)
+{
+	return (*phba->lpfc_hba_down_post)(phba);
+}
+
+/**
+ * lpfc_hb_timeout - The HBA-timer timeout handler
+ * @ptr: unsigned long holds the pointer to lpfc hba data structure.
+ *
+ * This is the HBA-timer timeout handler registered to the lpfc driver. When
+ * this timer fires, a HBA timeout event shall be posted to the lpfc driver
+ * work-port-events bitmap and the worker thread is notified. This timeout
+ * event will be used by the worker thread to invoke the actual timeout
+ * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
+ * be performed in the timeout handler and the HBA timeout event bit shall
+ * be cleared by the worker thread after it has taken the event bitmap out.
+ **/
+static void
+lpfc_hb_timeout(unsigned long ptr)
+{
+	struct lpfc_hba *phba;
+	uint32_t tmo_posted;
+	unsigned long iflag;
+
+	phba = (struct lpfc_hba *)ptr;
+
+	/* Check for heart beat timeout conditions */
+	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
+	tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
+	if (!tmo_posted)
+		phba->pport->work_port_events |= WORKER_HB_TMO;
+	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
+
+	/* Tell the worker thread there is work to do */
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+	return;
+}
+
+/**
+ * lpfc_rrq_timeout - The RRQ-timer timeout handler
+ * @ptr: unsigned long holds the pointer to lpfc hba data structure.
+ *
+ * This is the RRQ-timer timeout handler registered to the lpfc driver. When
+ * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
+ * work-port-events bitmap and the worker thread is notified. This timeout
+ * event will be used by the worker thread to invoke the actual timeout
+ * handler routine, lpfc_rrq_handler. Any periodical operations will
+ * be performed in the timeout handler and the RRQ timeout event bit shall
+ * be cleared by the worker thread after it has taken the event bitmap out.
+ **/
+static void
+lpfc_rrq_timeout(unsigned long ptr)
+{
+	struct lpfc_hba *phba;
+	unsigned long iflag;
+
+	phba = (struct lpfc_hba *)ptr;
+	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
+	phba->hba_flag |= HBA_RRQ_ACTIVE;
+	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
+	lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
+ * @phba: pointer to lpfc hba data structure.
+ * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This is the callback function to the lpfc heart-beat mailbox command.
+ * If configured, the lpfc driver issues the heart-beat mailbox command to
+ * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
+ * heart-beat mailbox command is issued, the driver shall set up heart-beat
+ * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
+ * heart-beat outstanding state. Once the mailbox command comes back and
+ * no error conditions detected, the heart-beat mailbox command timer is
+ * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
+ * state is cleared for the next heart-beat. If the timer expired with the
+ * heart-beat outstanding state set, the driver will put the HBA offline.
+ **/
+static void
+lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
+{
+	unsigned long drvr_flag;
+
+	spin_lock_irqsave(&phba->hbalock, drvr_flag);
+	phba->hb_outstanding = 0;
+	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+
+	/* Check and reset heart-beat timer is necessary */
+	mempool_free(pmboxq, phba->mbox_mem_pool);
+	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
+		!(phba->link_state == LPFC_HBA_ERROR) &&
+		!(phba->pport->load_flag & FC_UNLOADING))
+		mod_timer(&phba->hb_tmofunc,
+			jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+	return;
+}
+
+/**
+ * lpfc_hb_timeout_handler - The HBA-timer timeout handler
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This is the actual HBA-timer timeout handler to be invoked by the worker
+ * thread whenever the HBA timer fired and HBA-timeout event posted. This
+ * handler performs any periodic operations needed for the device. If such
+ * periodic event has already been attended to either in the interrupt handler
+ * or by processing slow-ring or fast-ring events within the HBA-timer
+ * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
+ * the timer for the next timeout period. If lpfc heart-beat mailbox command
+ * is configured and there is no heart-beat mailbox command outstanding, a
+ * heart-beat mailbox is issued and timer set properly. Otherwise, if there
+ * has been a heart-beat mailbox command outstanding, the HBA shall be put
+ * to offline.
+ **/
+void
+lpfc_hb_timeout_handler(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	LPFC_MBOXQ_t *pmboxq;
+	struct lpfc_dmabuf *buf_ptr;
+	int retval, i;
+	struct lpfc_sli *psli = &phba->sli;
+	LIST_HEAD(completions);
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
+			lpfc_rcv_seq_check_edtov(vports[i]);
+	lpfc_destroy_vport_work_array(phba, vports);
+
+	if ((phba->link_state == LPFC_HBA_ERROR) ||
+		(phba->pport->load_flag & FC_UNLOADING) ||
+		(phba->pport->fc_flag & FC_OFFLINE_MODE))
+		return;
+
+	spin_lock_irq(&phba->pport->work_port_lock);
+
+	if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
+		jiffies)) {
+		spin_unlock_irq(&phba->pport->work_port_lock);
+		if (!phba->hb_outstanding)
+			mod_timer(&phba->hb_tmofunc,
+				jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+		else
+			mod_timer(&phba->hb_tmofunc,
+				jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+		return;
+	}
+	spin_unlock_irq(&phba->pport->work_port_lock);
+
+	if (phba->elsbuf_cnt &&
+		(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
+		spin_lock_irq(&phba->hbalock);
+		list_splice_init(&phba->elsbuf, &completions);
+		phba->elsbuf_cnt = 0;
+		phba->elsbuf_prev_cnt = 0;
+		spin_unlock_irq(&phba->hbalock);
+
+		while (!list_empty(&completions)) {
+			list_remove_head(&completions, buf_ptr,
+				struct lpfc_dmabuf, list);
+			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+			kfree(buf_ptr);
+		}
+	}
+	phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
+
+	/* If there is no heart beat outstanding, issue a heartbeat command */
+	if (phba->cfg_enable_hba_heartbeat) {
+		if (!phba->hb_outstanding) {
+			if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
+				(list_empty(&psli->mboxq))) {
+				pmboxq = mempool_alloc(phba->mbox_mem_pool,
+							GFP_KERNEL);
+				if (!pmboxq) {
+					mod_timer(&phba->hb_tmofunc,
+						 jiffies +
+						 HZ * LPFC_HB_MBOX_INTERVAL);
+					return;
+				}
+
+				lpfc_heart_beat(phba, pmboxq);
+				pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
+				pmboxq->vport = phba->pport;
+				retval = lpfc_sli_issue_mbox(phba, pmboxq,
+						MBX_NOWAIT);
+
+				if (retval != MBX_BUSY &&
+					retval != MBX_SUCCESS) {
+					mempool_free(pmboxq,
+							phba->mbox_mem_pool);
+					mod_timer(&phba->hb_tmofunc,
+						jiffies +
+						HZ * LPFC_HB_MBOX_INTERVAL);
+					return;
+				}
+				phba->skipped_hb = 0;
+				phba->hb_outstanding = 1;
+			} else if (time_before_eq(phba->last_completion_time,
+					phba->skipped_hb)) {
+				lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"2857 Last completion time not "
+					" updated in %d ms\n",
+					jiffies_to_msecs(jiffies
+						 - phba->last_completion_time));
+			} else
+				phba->skipped_hb = jiffies;
+
+			mod_timer(&phba->hb_tmofunc,
+				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+			return;
+		} else {
+			/*
+			* If heart beat timeout called with hb_outstanding set
+			* we need to give the hb mailbox cmd a chance to
+			* complete or TMO.
+			*/
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+					"0459 Adapter heartbeat still out"
+					"standing:last compl time was %d ms.\n",
+					jiffies_to_msecs(jiffies
+						 - phba->last_completion_time));
+			mod_timer(&phba->hb_tmofunc,
+				  jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+		}
+	}
+}
+
+/**
+ * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to bring the HBA offline when HBA hardware error
+ * other than Port Error 6 has been detected.
+ **/
+static void
+lpfc_offline_eratt(struct lpfc_hba *phba)
+{
+	struct lpfc_sli   *psli = &phba->sli;
+
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+	spin_unlock_irq(&phba->hbalock);
+	lpfc_offline_prep(phba);
+
+	lpfc_offline(phba);
+	lpfc_reset_barrier(phba);
+	spin_lock_irq(&phba->hbalock);
+	lpfc_sli_brdreset(phba);
+	spin_unlock_irq(&phba->hbalock);
+	lpfc_hba_down_post(phba);
+	lpfc_sli_brdready(phba, HS_MBRDY);
+	lpfc_unblock_mgmt_io(phba);
+	phba->link_state = LPFC_HBA_ERROR;
+	return;
+}
+
+/**
+ * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to bring a SLI4 HBA offline when HBA hardware error
+ * other than Port Error 6 has been detected.
+ **/
+static void
+lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
+{
+	lpfc_offline_prep(phba);
+	lpfc_offline(phba);
+	lpfc_sli4_brdreset(phba);
+	lpfc_hba_down_post(phba);
+	lpfc_sli4_post_status_check(phba);
+	lpfc_unblock_mgmt_io(phba);
+	phba->link_state = LPFC_HBA_ERROR;
+}
+
+/**
+ * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to handle the deferred HBA hardware error
+ * conditions. This type of error is indicated by HBA by setting ER1
+ * and another ER bit in the host status register. The driver will
+ * wait until the ER1 bit clears before handling the error condition.
+ **/
+static void
+lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
+{
+	uint32_t old_host_status = phba->work_hs;
+	struct lpfc_sli_ring  *pring;
+	struct lpfc_sli *psli = &phba->sli;
+
+	/* If the pci channel is offline, ignore possible errors,
+	 * since we cannot communicate with the pci card anyway.
+	 */
+	if (pci_channel_offline(phba->pcidev)) {
+		spin_lock_irq(&phba->hbalock);
+		phba->hba_flag &= ~DEFER_ERATT;
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+		"0479 Deferred Adapter Hardware Error "
+		"Data: x%x x%x x%x\n",
+		phba->work_hs,
+		phba->work_status[0], phba->work_status[1]);
+
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+	spin_unlock_irq(&phba->hbalock);
+
+
+	/*
+	 * Firmware stops when it triggred erratt. That could cause the I/Os
+	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
+	 * SCSI layer retry it after re-establishing link.
+	 */
+	pring = &psli->ring[psli->fcp_ring];
+	lpfc_sli_abort_iocb_ring(phba, pring);
+
+	/*
+	 * There was a firmware error. Take the hba offline and then
+	 * attempt to restart it.
+	 */
+	lpfc_offline_prep(phba);
+	lpfc_offline(phba);
+
+	/* Wait for the ER1 bit to clear.*/
+	while (phba->work_hs & HS_FFER1) {
+		msleep(100);
+		if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
+			phba->work_hs = UNPLUG_ERR ;
+			break;
+		}
+		/* If driver is unloading let the worker thread continue */
+		if (phba->pport->load_flag & FC_UNLOADING) {
+			phba->work_hs = 0;
+			break;
+		}
+	}
+
+	/*
+	 * This is to ptrotect against a race condition in which
+	 * first write to the host attention register clear the
+	 * host status register.
+	 */
+	if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
+		phba->work_hs = old_host_status & ~HS_FFER1;
+
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag &= ~DEFER_ERATT;
+	spin_unlock_irq(&phba->hbalock);
+	phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
+	phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
+}
+
+static void
+lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
+{
+	struct lpfc_board_event_header board_event;
+	struct Scsi_Host *shost;
+
+	board_event.event_type = FC_REG_BOARD_EVENT;
+	board_event.subcategory = LPFC_EVENT_PORTINTERR;
+	shost = lpfc_shost_from_vport(phba->pport);
+	fc_host_post_vendor_event(shost, fc_get_event_number(),
+				  sizeof(board_event),
+				  (char *) &board_event,
+				  LPFC_NL_VENDOR_ID);
+}
+
+/**
+ * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to handle the following HBA hardware error
+ * conditions:
+ * 1 - HBA error attention interrupt
+ * 2 - DMA ring index out of range
+ * 3 - Mailbox command came back as unknown
+ **/
+static void
+lpfc_handle_eratt_s3(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+	struct lpfc_sli   *psli = &phba->sli;
+	struct lpfc_sli_ring  *pring;
+	uint32_t event_data;
+	unsigned long temperature;
+	struct temp_event temp_event_data;
+	struct Scsi_Host  *shost;
+
+	/* If the pci channel is offline, ignore possible errors,
+	 * since we cannot communicate with the pci card anyway.
+	 */
+	if (pci_channel_offline(phba->pcidev)) {
+		spin_lock_irq(&phba->hbalock);
+		phba->hba_flag &= ~DEFER_ERATT;
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+
+	/* If resets are disabled then leave the HBA alone and return */
+	if (!phba->cfg_enable_hba_reset)
+		return;
+
+	/* Send an internal error event to mgmt application */
+	lpfc_board_errevt_to_mgmt(phba);
+
+	if (phba->hba_flag & DEFER_ERATT)
+		lpfc_handle_deferred_eratt(phba);
+
+	if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
+		if (phba->work_hs & HS_FFER6)
+			/* Re-establishing Link */
+			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+					"1301 Re-establishing Link "
+					"Data: x%x x%x x%x\n",
+					phba->work_hs, phba->work_status[0],
+					phba->work_status[1]);
+		if (phba->work_hs & HS_FFER8)
+			/* Device Zeroization */
+			lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+					"2861 Host Authentication device "
+					"zeroization Data:x%x x%x x%x\n",
+					phba->work_hs, phba->work_status[0],
+					phba->work_status[1]);
+
+		spin_lock_irq(&phba->hbalock);
+		psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+		spin_unlock_irq(&phba->hbalock);
+
+		/*
+		* Firmware stops when it triggled erratt with HS_FFER6.
+		* That could cause the I/Os dropped by the firmware.
+		* Error iocb (I/O) on txcmplq and let the SCSI layer
+		* retry it after re-establishing link.
+		*/
+		pring = &psli->ring[psli->fcp_ring];
+		lpfc_sli_abort_iocb_ring(phba, pring);
+
+		/*
+		 * There was a firmware error.  Take the hba offline and then
+		 * attempt to restart it.
+		 */
+		lpfc_offline_prep(phba);
+		lpfc_offline(phba);
+		lpfc_sli_brdrestart(phba);
+		if (lpfc_online(phba) == 0) {	/* Initialize the HBA */
+			lpfc_unblock_mgmt_io(phba);
+			return;
+		}
+		lpfc_unblock_mgmt_io(phba);
+	} else if (phba->work_hs & HS_CRIT_TEMP) {
+		temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
+		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+		temp_event_data.event_code = LPFC_CRIT_TEMP;
+		temp_event_data.data = (uint32_t)temperature;
+
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0406 Adapter maximum temperature exceeded "
+				"(%ld), taking this port offline "
+				"Data: x%x x%x x%x\n",
+				temperature, phba->work_hs,
+				phba->work_status[0], phba->work_status[1]);
+
+		shost = lpfc_shost_from_vport(phba->pport);
+		fc_host_post_vendor_event(shost, fc_get_event_number(),
+					  sizeof(temp_event_data),
+					  (char *) &temp_event_data,
+					  SCSI_NL_VID_TYPE_PCI
+					  | PCI_VENDOR_ID_EMULEX);
+
+		spin_lock_irq(&phba->hbalock);
+		phba->over_temp_state = HBA_OVER_TEMP;
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_offline_eratt(phba);
+
+	} else {
+		/* The if clause above forces this code path when the status
+		 * failure is a value other than FFER6. Do not call the offline
+		 * twice. This is the adapter hardware error path.
+		 */
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0457 Adapter Hardware Error "
+				"Data: x%x x%x x%x\n",
+				phba->work_hs,
+				phba->work_status[0], phba->work_status[1]);
+
+		event_data = FC_REG_DUMP_EVENT;
+		shost = lpfc_shost_from_vport(vport);
+		fc_host_post_vendor_event(shost, fc_get_event_number(),
+				sizeof(event_data), (char *) &event_data,
+				SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+
+		lpfc_offline_eratt(phba);
+	}
+	return;
+}
+
+/**
+ * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to handle the SLI4 HBA hardware error attention
+ * conditions.
+ **/
+static void
+lpfc_handle_eratt_s4(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+	uint32_t event_data;
+	struct Scsi_Host *shost;
+	uint32_t if_type;
+	struct lpfc_register portstat_reg = {0};
+	uint32_t reg_err1, reg_err2;
+	uint32_t uerrlo_reg, uemasklo_reg;
+	uint32_t pci_rd_rc1, pci_rd_rc2;
+	int rc;
+
+	/* If the pci channel is offline, ignore possible errors, since
+	 * we cannot communicate with the pci card anyway.
+	 */
+	if (pci_channel_offline(phba->pcidev))
+		return;
+	/* If resets are disabled then leave the HBA alone and return */
+	if (!phba->cfg_enable_hba_reset)
+		return;
+
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	switch (if_type) {
+	case LPFC_SLI_INTF_IF_TYPE_0:
+		pci_rd_rc1 = lpfc_readl(
+				phba->sli4_hba.u.if_type0.UERRLOregaddr,
+				&uerrlo_reg);
+		pci_rd_rc2 = lpfc_readl(
+				phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
+				&uemasklo_reg);
+		/* consider PCI bus read error as pci_channel_offline */
+		if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
+			return;
+		lpfc_sli4_offline_eratt(phba);
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_2:
+		pci_rd_rc1 = lpfc_readl(
+				phba->sli4_hba.u.if_type2.STATUSregaddr,
+				&portstat_reg.word0);
+		/* consider PCI bus read error as pci_channel_offline */
+		if (pci_rd_rc1 == -EIO) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3151 PCI bus read access failure: x%x\n",
+				readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
+			return;
+		}
+		reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
+		reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
+		if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
+			/* TODO: Register for Overtemp async events. */
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2889 Port Overtemperature event, "
+				"taking port offline\n");
+			spin_lock_irq(&phba->hbalock);
+			phba->over_temp_state = HBA_OVER_TEMP;
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_sli4_offline_eratt(phba);
+			break;
+		}
+		if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+		    reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3143 Port Down: Firmware Restarted\n");
+		else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+			 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3144 Port Down: Debug Dump\n");
+		else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+			 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"3145 Port Down: Provisioning\n");
+		/*
+		 * On error status condition, driver need to wait for port
+		 * ready before performing reset.
+		 */
+		rc = lpfc_sli4_pdev_status_reg_wait(phba);
+		if (!rc) {
+			/* need reset: attempt for port recovery */
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2887 Reset Needed: Attempting Port "
+					"Recovery...\n");
+			lpfc_offline_prep(phba);
+			lpfc_offline(phba);
+			lpfc_sli_brdrestart(phba);
+			if (lpfc_online(phba) == 0) {
+				lpfc_unblock_mgmt_io(phba);
+				/* don't report event on forced debug dump */
+				if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+				    reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
+					return;
+				else
+					break;
+			}
+			/* fall through for not able to recover */
+		}
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3152 Unrecoverable error, bring the port "
+				"offline\n");
+		lpfc_sli4_offline_eratt(phba);
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_1:
+	default:
+		break;
+	}
+	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+			"3123 Report dump event to upper layer\n");
+	/* Send an internal error event to mgmt application */
+	lpfc_board_errevt_to_mgmt(phba);
+
+	event_data = FC_REG_DUMP_EVENT;
+	shost = lpfc_shost_from_vport(vport);
+	fc_host_post_vendor_event(shost, fc_get_event_number(),
+				  sizeof(event_data), (char *) &event_data,
+				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+}
+
+/**
+ * lpfc_handle_eratt - Wrapper func for handling hba error attention
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba error attention handling
+ * routine from the API jump table function pointer from the lpfc_hba struct.
+ *
+ * Return codes
+ *   0 - success.
+ *   Any other value - error.
+ **/
+void
+lpfc_handle_eratt(struct lpfc_hba *phba)
+{
+	(*phba->lpfc_handle_eratt)(phba);
+}
+
+/**
+ * lpfc_handle_latt - The HBA link event handler
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked from the worker thread to handle a HBA host
+ * attention link event.
+ **/
+void
+lpfc_handle_latt(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+	struct lpfc_sli   *psli = &phba->sli;
+	LPFC_MBOXQ_t *pmb;
+	volatile uint32_t control;
+	struct lpfc_dmabuf *mp;
+	int rc = 0;
+
+	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		rc = 1;
+		goto lpfc_handle_latt_err_exit;
+	}
+
+	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!mp) {
+		rc = 2;
+		goto lpfc_handle_latt_free_pmb;
+	}
+
+	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+	if (!mp->virt) {
+		rc = 3;
+		goto lpfc_handle_latt_free_mp;
+	}
+
+	/* Cleanup any outstanding ELS commands */
+	lpfc_els_flush_all_cmd(phba);
+
+	psli->slistat.link_event++;
+	lpfc_read_topology(phba, pmb, mp);
+	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
+	pmb->vport = vport;
+	/* Block ELS IOCBs until we have processed this mbox command */
+	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
+	rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		rc = 4;
+		goto lpfc_handle_latt_free_mbuf;
+	}
+
+	/* Clear Link Attention in HA REG */
+	spin_lock_irq(&phba->hbalock);
+	writel(HA_LATT, phba->HAregaddr);
+	readl(phba->HAregaddr); /* flush */
+	spin_unlock_irq(&phba->hbalock);
+
+	return;
+
+lpfc_handle_latt_free_mbuf:
+	phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+lpfc_handle_latt_free_mp:
+	kfree(mp);
+lpfc_handle_latt_free_pmb:
+	mempool_free(pmb, phba->mbox_mem_pool);
+lpfc_handle_latt_err_exit:
+	/* Enable Link attention interrupts */
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag |= LPFC_PROCESS_LA;
+	control = readl(phba->HCregaddr);
+	control |= HC_LAINT_ENA;
+	writel(control, phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+
+	/* Clear Link Attention in HA REG */
+	writel(HA_LATT, phba->HAregaddr);
+	readl(phba->HAregaddr); /* flush */
+	spin_unlock_irq(&phba->hbalock);
+	lpfc_linkdown(phba);
+	phba->link_state = LPFC_HBA_ERROR;
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+		     "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
+
+	return;
+}
+
+/**
+ * lpfc_parse_vpd - Parse VPD (Vital Product Data)
+ * @phba: pointer to lpfc hba data structure.
+ * @vpd: pointer to the vital product data.
+ * @len: length of the vital product data in bytes.
+ *
+ * This routine parses the Vital Product Data (VPD). The VPD is treated as
+ * an array of characters. In this routine, the ModelName, ProgramType, and
+ * ModelDesc, etc. fields of the phba data structure will be populated.
+ *
+ * Return codes
+ *   0 - pointer to the VPD passed in is NULL
+ *   1 - success
+ **/
+int
+lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
+{
+	uint8_t lenlo, lenhi;
+	int Length;
+	int i, j;
+	int finished = 0;
+	int index = 0;
+
+	if (!vpd)
+		return 0;
+
+	/* Vital Product */
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"0455 Vital Product Data: x%x x%x x%x x%x\n",
+			(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
+			(uint32_t) vpd[3]);
+	while (!finished && (index < (len - 4))) {
+		switch (vpd[index]) {
+		case 0x82:
+		case 0x91:
+			index += 1;
+			lenlo = vpd[index];
+			index += 1;
+			lenhi = vpd[index];
+			index += 1;
+			i = ((((unsigned short)lenhi) << 8) + lenlo);
+			index += i;
+			break;
+		case 0x90:
+			index += 1;
+			lenlo = vpd[index];
+			index += 1;
+			lenhi = vpd[index];
+			index += 1;
+			Length = ((((unsigned short)lenhi) << 8) + lenlo);
+			if (Length > len - index)
+				Length = len - index;
+			while (Length > 0) {
+			/* Look for Serial Number */
+			if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
+				index += 2;
+				i = vpd[index];
+				index += 1;
+				j = 0;
+				Length -= (3+i);
+				while(i--) {
+					phba->SerialNumber[j++] = vpd[index++];
+					if (j == 31)
+						break;
+				}
+				phba->SerialNumber[j] = 0;
+				continue;
+			}
+			else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
+				phba->vpd_flag |= VPD_MODEL_DESC;
+				index += 2;
+				i = vpd[index];
+				index += 1;
+				j = 0;
+				Length -= (3+i);
+				while(i--) {
+					phba->ModelDesc[j++] = vpd[index++];
+					if (j == 255)
+						break;
+				}
+				phba->ModelDesc[j] = 0;
+				continue;
+			}
+			else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
+				phba->vpd_flag |= VPD_MODEL_NAME;
+				index += 2;
+				i = vpd[index];
+				index += 1;
+				j = 0;
+				Length -= (3+i);
+				while(i--) {
+					phba->ModelName[j++] = vpd[index++];
+					if (j == 79)
+						break;
+				}
+				phba->ModelName[j] = 0;
+				continue;
+			}
+			else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
+				phba->vpd_flag |= VPD_PROGRAM_TYPE;
+				index += 2;
+				i = vpd[index];
+				index += 1;
+				j = 0;
+				Length -= (3+i);
+				while(i--) {
+					phba->ProgramType[j++] = vpd[index++];
+					if (j == 255)
+						break;
+				}
+				phba->ProgramType[j] = 0;
+				continue;
+			}
+			else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
+				phba->vpd_flag |= VPD_PORT;
+				index += 2;
+				i = vpd[index];
+				index += 1;
+				j = 0;
+				Length -= (3+i);
+				while(i--) {
+					if ((phba->sli_rev == LPFC_SLI_REV4) &&
+					    (phba->sli4_hba.pport_name_sta ==
+					     LPFC_SLI4_PPNAME_GET)) {
+						j++;
+						index++;
+					} else
+						phba->Port[j++] = vpd[index++];
+					if (j == 19)
+						break;
+				}
+				if ((phba->sli_rev != LPFC_SLI_REV4) ||
+				    (phba->sli4_hba.pport_name_sta ==
+				     LPFC_SLI4_PPNAME_NON))
+					phba->Port[j] = 0;
+				continue;
+			}
+			else {
+				index += 2;
+				i = vpd[index];
+				index += 1;
+				index += i;
+				Length -= (3 + i);
+			}
+		}
+		finished = 0;
+		break;
+		case 0x78:
+			finished = 1;
+			break;
+		default:
+			index ++;
+			break;
+		}
+	}
+
+	return(1);
+}
+
+/**
+ * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
+ * @phba: pointer to lpfc hba data structure.
+ * @mdp: pointer to the data structure to hold the derived model name.
+ * @descp: pointer to the data structure to hold the derived description.
+ *
+ * This routine retrieves HBA's description based on its registered PCI device
+ * ID. The @descp passed into this function points to an array of 256 chars. It
+ * shall be returned with the model name, maximum speed, and the host bus type.
+ * The @mdp passed into this function points to an array of 80 chars. When the
+ * function returns, the @mdp will be filled with the model name.
+ **/
+static void
+lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
+{
+	lpfc_vpd_t *vp;
+	uint16_t dev_id = phba->pcidev->device;
+	int max_speed;
+	int GE = 0;
+	int oneConnect = 0; /* default is not a oneConnect */
+	struct {
+		char *name;
+		char *bus;
+		char *function;
+	} m = {"<Unknown>", "", ""};
+
+	if (mdp && mdp[0] != '\0'
+		&& descp && descp[0] != '\0')
+		return;
+
+	if (phba->lmt & LMT_16Gb)
+		max_speed = 16;
+	else if (phba->lmt & LMT_10Gb)
+		max_speed = 10;
+	else if (phba->lmt & LMT_8Gb)
+		max_speed = 8;
+	else if (phba->lmt & LMT_4Gb)
+		max_speed = 4;
+	else if (phba->lmt & LMT_2Gb)
+		max_speed = 2;
+	else
+		max_speed = 1;
+
+	vp = &phba->vpd;
+
+	switch (dev_id) {
+	case PCI_DEVICE_ID_FIREFLY:
+		m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_SUPERFLY:
+		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
+			m = (typeof(m)){"LP7000", "PCI",
+					"Fibre Channel Adapter"};
+		else
+			m = (typeof(m)){"LP7000E", "PCI",
+					"Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_DRAGONFLY:
+		m = (typeof(m)){"LP8000", "PCI",
+				"Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_CENTAUR:
+		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
+			m = (typeof(m)){"LP9002", "PCI",
+					"Fibre Channel Adapter"};
+		else
+			m = (typeof(m)){"LP9000", "PCI",
+					"Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_RFLY:
+		m = (typeof(m)){"LP952", "PCI",
+				"Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_PEGASUS:
+		m = (typeof(m)){"LP9802", "PCI-X",
+				"Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_THOR:
+		m = (typeof(m)){"LP10000", "PCI-X",
+				"Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_VIPER:
+		m = (typeof(m)){"LPX1000",  "PCI-X",
+				"Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_PFLY:
+		m = (typeof(m)){"LP982", "PCI-X",
+				"Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_TFLY:
+		m = (typeof(m)){"LP1050", "PCI-X",
+				"Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_HELIOS:
+		m = (typeof(m)){"LP11000", "PCI-X2",
+				"Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_HELIOS_SCSP:
+		m = (typeof(m)){"LP11000-SP", "PCI-X2",
+				"Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_HELIOS_DCSP:
+		m = (typeof(m)){"LP11002-SP",  "PCI-X2",
+				"Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_NEPTUNE:
+		m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_NEPTUNE_SCSP:
+		m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_NEPTUNE_DCSP:
+		m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_BMID:
+		m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_BSMB:
+		m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_ZEPHYR:
+		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_ZEPHYR_SCSP:
+		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_ZEPHYR_DCSP:
+		m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
+		GE = 1;
+		break;
+	case PCI_DEVICE_ID_ZMID:
+		m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_ZSMB:
+		m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_LP101:
+		m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_LP10000S:
+		m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_LP11000S:
+		m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_LPE11000S:
+		m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_SAT:
+		m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_SAT_MID:
+		m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_SAT_SMB:
+		m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_SAT_DCSP:
+		m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_SAT_SCSP:
+		m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_SAT_S:
+		m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_HORNET:
+		m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
+		GE = 1;
+		break;
+	case PCI_DEVICE_ID_PROTEUS_VF:
+		m = (typeof(m)){"LPev12000", "PCIe IOV",
+				"Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_PROTEUS_PF:
+		m = (typeof(m)){"LPev12000", "PCIe IOV",
+				"Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_PROTEUS_S:
+		m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
+				"Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_TIGERSHARK:
+		oneConnect = 1;
+		m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
+		break;
+	case PCI_DEVICE_ID_TOMCAT:
+		oneConnect = 1;
+		m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
+		break;
+	case PCI_DEVICE_ID_FALCON:
+		m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
+				"EmulexSecure Fibre"};
+		break;
+	case PCI_DEVICE_ID_BALIUS:
+		m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
+				"Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_LANCER_FC:
+	case PCI_DEVICE_ID_LANCER_FC_VF:
+		m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_LANCER_FCOE:
+	case PCI_DEVICE_ID_LANCER_FCOE_VF:
+		oneConnect = 1;
+		m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
+		break;
+	default:
+		m = (typeof(m)){"Unknown", "", ""};
+		break;
+	}
+
+	if (mdp && mdp[0] == '\0')
+		snprintf(mdp, 79,"%s", m.name);
+	/*
+	 * oneConnect hba requires special processing, they are all initiators
+	 * and we put the port number on the end
+	 */
+	if (descp && descp[0] == '\0') {
+		if (oneConnect)
+			snprintf(descp, 255,
+				"Emulex OneConnect %s, %s Initiator, Port %s",
+				m.name, m.function,
+				phba->Port);
+		else
+			snprintf(descp, 255,
+				"Emulex %s %d%s %s %s",
+				m.name, max_speed, (GE) ? "GE" : "Gb",
+				m.bus, m.function);
+	}
+}
+
+/**
+ * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a IOCB ring.
+ * @cnt: the number of IOCBs to be posted to the IOCB ring.
+ *
+ * This routine posts a given number of IOCBs with the associated DMA buffer
+ * descriptors specified by the cnt argument to the given IOCB ring.
+ *
+ * Return codes
+ *   The number of IOCBs NOT able to be posted to the IOCB ring.
+ **/
+int
+lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
+{
+	IOCB_t *icmd;
+	struct lpfc_iocbq *iocb;
+	struct lpfc_dmabuf *mp1, *mp2;
+
+	cnt += pring->missbufcnt;
+
+	/* While there are buffers to post */
+	while (cnt > 0) {
+		/* Allocate buffer for  command iocb */
+		iocb = lpfc_sli_get_iocbq(phba);
+		if (iocb == NULL) {
+			pring->missbufcnt = cnt;
+			return cnt;
+		}
+		icmd = &iocb->iocb;
+
+		/* 2 buffers can be posted per command */
+		/* Allocate buffer to post */
+		mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+		if (mp1)
+		    mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
+		if (!mp1 || !mp1->virt) {
+			kfree(mp1);
+			lpfc_sli_release_iocbq(phba, iocb);
+			pring->missbufcnt = cnt;
+			return cnt;
+		}
+
+		INIT_LIST_HEAD(&mp1->list);
+		/* Allocate buffer to post */
+		if (cnt > 1) {
+			mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+			if (mp2)
+				mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+							    &mp2->phys);
+			if (!mp2 || !mp2->virt) {
+				kfree(mp2);
+				lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
+				kfree(mp1);
+				lpfc_sli_release_iocbq(phba, iocb);
+				pring->missbufcnt = cnt;
+				return cnt;
+			}
+
+			INIT_LIST_HEAD(&mp2->list);
+		} else {
+			mp2 = NULL;
+		}
+
+		icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
+		icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
+		icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
+		icmd->ulpBdeCount = 1;
+		cnt--;
+		if (mp2) {
+			icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
+			icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
+			icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
+			cnt--;
+			icmd->ulpBdeCount = 2;
+		}
+
+		icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
+		icmd->ulpLe = 1;
+
+		if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
+		    IOCB_ERROR) {
+			lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
+			kfree(mp1);
+			cnt++;
+			if (mp2) {
+				lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
+				kfree(mp2);
+				cnt++;
+			}
+			lpfc_sli_release_iocbq(phba, iocb);
+			pring->missbufcnt = cnt;
+			return cnt;
+		}
+		lpfc_sli_ringpostbuf_put(phba, pring, mp1);
+		if (mp2)
+			lpfc_sli_ringpostbuf_put(phba, pring, mp2);
+	}
+	pring->missbufcnt = 0;
+	return 0;
+}
+
+/**
+ * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine posts initial receive IOCB buffers to the ELS ring. The
+ * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
+ * set to 64 IOCBs.
+ *
+ * Return codes
+ *   0 - success (currently always success)
+ **/
+static int
+lpfc_post_rcv_buf(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+
+	/* Ring 0, ELS / CT buffers */
+	lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
+	/* Ring 2 - FCP no buffers needed */
+
+	return 0;
+}
+
+#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
+
+/**
+ * lpfc_sha_init - Set up initial array of hash table entries
+ * @HashResultPointer: pointer to an array as hash table.
+ *
+ * This routine sets up the initial values to the array of hash table entries
+ * for the LC HBAs.
+ **/
+static void
+lpfc_sha_init(uint32_t * HashResultPointer)
+{
+	HashResultPointer[0] = 0x67452301;
+	HashResultPointer[1] = 0xEFCDAB89;
+	HashResultPointer[2] = 0x98BADCFE;
+	HashResultPointer[3] = 0x10325476;
+	HashResultPointer[4] = 0xC3D2E1F0;
+}
+
+/**
+ * lpfc_sha_iterate - Iterate initial hash table with the working hash table
+ * @HashResultPointer: pointer to an initial/result hash table.
+ * @HashWorkingPointer: pointer to an working hash table.
+ *
+ * This routine iterates an initial hash table pointed by @HashResultPointer
+ * with the values from the working hash table pointeed by @HashWorkingPointer.
+ * The results are putting back to the initial hash table, returned through
+ * the @HashResultPointer as the result hash table.
+ **/
+static void
+lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
+{
+	int t;
+	uint32_t TEMP;
+	uint32_t A, B, C, D, E;
+	t = 16;
+	do {
+		HashWorkingPointer[t] =
+		    S(1,
+		      HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
+								     8] ^
+		      HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
+	} while (++t <= 79);
+	t = 0;
+	A = HashResultPointer[0];
+	B = HashResultPointer[1];
+	C = HashResultPointer[2];
+	D = HashResultPointer[3];
+	E = HashResultPointer[4];
+
+	do {
+		if (t < 20) {
+			TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
+		} else if (t < 40) {
+			TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
+		} else if (t < 60) {
+			TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
+		} else {
+			TEMP = (B ^ C ^ D) + 0xCA62C1D6;
+		}
+		TEMP += S(5, A) + E + HashWorkingPointer[t];
+		E = D;
+		D = C;
+		C = S(30, B);
+		B = A;
+		A = TEMP;
+	} while (++t <= 79);
+
+	HashResultPointer[0] += A;
+	HashResultPointer[1] += B;
+	HashResultPointer[2] += C;
+	HashResultPointer[3] += D;
+	HashResultPointer[4] += E;
+
+}
+
+/**
+ * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
+ * @RandomChallenge: pointer to the entry of host challenge random number array.
+ * @HashWorking: pointer to the entry of the working hash array.
+ *
+ * This routine calculates the working hash array referred by @HashWorking
+ * from the challenge random numbers associated with the host, referred by
+ * @RandomChallenge. The result is put into the entry of the working hash
+ * array and returned by reference through @HashWorking.
+ **/
+static void
+lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
+{
+	*HashWorking = (*RandomChallenge ^ *HashWorking);
+}
+
+/**
+ * lpfc_hba_init - Perform special handling for LC HBA initialization
+ * @phba: pointer to lpfc hba data structure.
+ * @hbainit: pointer to an array of unsigned 32-bit integers.
+ *
+ * This routine performs the special handling for LC HBA initialization.
+ **/
+void
+lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
+{
+	int t;
+	uint32_t *HashWorking;
+	uint32_t *pwwnn = (uint32_t *) phba->wwnn;
+
+	HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
+	if (!HashWorking)
+		return;
+
+	HashWorking[0] = HashWorking[78] = *pwwnn++;
+	HashWorking[1] = HashWorking[79] = *pwwnn;
+
+	for (t = 0; t < 7; t++)
+		lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
+
+	lpfc_sha_init(hbainit);
+	lpfc_sha_iterate(hbainit, HashWorking);
+	kfree(HashWorking);
+}
+
+/**
+ * lpfc_cleanup - Performs vport cleanups before deleting a vport
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine performs the necessary cleanups before deleting the @vport.
+ * It invokes the discovery state machine to perform necessary state
+ * transitions and to release the ndlps associated with the @vport. Note,
+ * the physical port is treated as @vport 0.
+ **/
+void
+lpfc_cleanup(struct lpfc_vport *vport)
+{
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_nodelist *ndlp, *next_ndlp;
+	int i = 0;
+
+	if (phba->link_state > LPFC_LINK_DOWN)
+		lpfc_port_link_failure(vport);
+
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp)) {
+			ndlp = lpfc_enable_node(vport, ndlp,
+						NLP_STE_UNUSED_NODE);
+			if (!ndlp)
+				continue;
+			spin_lock_irq(&phba->ndlp_lock);
+			NLP_SET_FREE_REQ(ndlp);
+			spin_unlock_irq(&phba->ndlp_lock);
+			/* Trigger the release of the ndlp memory */
+			lpfc_nlp_put(ndlp);
+			continue;
+		}
+		spin_lock_irq(&phba->ndlp_lock);
+		if (NLP_CHK_FREE_REQ(ndlp)) {
+			/* The ndlp should not be in memory free mode already */
+			spin_unlock_irq(&phba->ndlp_lock);
+			continue;
+		} else
+			/* Indicate request for freeing ndlp memory */
+			NLP_SET_FREE_REQ(ndlp);
+		spin_unlock_irq(&phba->ndlp_lock);
+
+		if (vport->port_type != LPFC_PHYSICAL_PORT &&
+		    ndlp->nlp_DID == Fabric_DID) {
+			/* Just free up ndlp with Fabric_DID for vports */
+			lpfc_nlp_put(ndlp);
+			continue;
+		}
+
+		/* take care of nodes in unused state before the state
+		 * machine taking action.
+		 */
+		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+			lpfc_nlp_put(ndlp);
+			continue;
+		}
+
+		if (ndlp->nlp_type & NLP_FABRIC)
+			lpfc_disc_state_machine(vport, ndlp, NULL,
+					NLP_EVT_DEVICE_RECOVERY);
+
+		lpfc_disc_state_machine(vport, ndlp, NULL,
+					     NLP_EVT_DEVICE_RM);
+	}
+
+	/* At this point, ALL ndlp's should be gone
+	 * because of the previous NLP_EVT_DEVICE_RM.
+	 * Lets wait for this to happen, if needed.
+	 */
+	while (!list_empty(&vport->fc_nodes)) {
+		if (i++ > 3000) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				"0233 Nodelist not empty\n");
+			list_for_each_entry_safe(ndlp, next_ndlp,
+						&vport->fc_nodes, nlp_listp) {
+				lpfc_printf_vlog(ndlp->vport, KERN_ERR,
+						LOG_NODE,
+						"0282 did:x%x ndlp:x%p "
+						"usgmap:x%x refcnt:%d\n",
+						ndlp->nlp_DID, (void *)ndlp,
+						ndlp->nlp_usg_map,
+						atomic_read(
+							&ndlp->kref.refcount));
+			}
+			break;
+		}
+
+		/* Wait for any activity on ndlps to settle */
+		msleep(10);
+	}
+	lpfc_cleanup_vports_rrqs(vport, NULL);
+}
+
+/**
+ * lpfc_stop_vport_timers - Stop all the timers associated with a vport
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine stops all the timers associated with a @vport. This function
+ * is invoked before disabling or deleting a @vport. Note that the physical
+ * port is treated as @vport 0.
+ **/
+void
+lpfc_stop_vport_timers(struct lpfc_vport *vport)
+{
+	del_timer_sync(&vport->els_tmofunc);
+	del_timer_sync(&vport->fc_fdmitmo);
+	del_timer_sync(&vport->delayed_disc_tmo);
+	lpfc_can_disctmo(vport);
+	return;
+}
+
+/**
+ * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
+ * caller of this routine should already hold the host lock.
+ **/
+void
+__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
+{
+	/* Clear pending FCF rediscovery wait flag */
+	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
+
+	/* Now, try to stop the timer */
+	del_timer(&phba->fcf.redisc_wait);
+}
+
+/**
+ * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
+ * checks whether the FCF rediscovery wait timer is pending with the host
+ * lock held before proceeding with disabling the timer and clearing the
+ * wait timer pendig flag.
+ **/
+void
+lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
+{
+	spin_lock_irq(&phba->hbalock);
+	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
+		/* FCF rediscovery timer already fired or stopped */
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+	__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
+	/* Clear failover in progress flags */
+	phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine stops all the timers associated with a HBA. This function is
+ * invoked before either putting a HBA offline or unloading the driver.
+ **/
+void
+lpfc_stop_hba_timers(struct lpfc_hba *phba)
+{
+	lpfc_stop_vport_timers(phba->pport);
+	del_timer_sync(&phba->sli.mbox_tmo);
+	del_timer_sync(&phba->fabric_block_timer);
+	del_timer_sync(&phba->eratt_poll);
+	del_timer_sync(&phba->hb_tmofunc);
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		del_timer_sync(&phba->rrq_tmr);
+		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+	}
+	phba->hb_outstanding = 0;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		/* Stop any LightPulse device specific driver timers */
+		del_timer_sync(&phba->fcp_poll_timer);
+		break;
+	case LPFC_PCI_DEV_OC:
+		/* Stop any OneConnect device sepcific driver timers */
+		lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0297 Invalid device group (x%x)\n",
+				phba->pci_dev_grp);
+		break;
+	}
+	return;
+}
+
+/**
+ * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine marks a HBA's management interface as blocked. Once the HBA's
+ * management interface is marked as blocked, all the user space access to
+ * the HBA, whether they are from sysfs interface or libdfc interface will
+ * all be blocked. The HBA is set to block the management interface when the
+ * driver prepares the HBA interface for online or offline.
+ **/
+static void
+lpfc_block_mgmt_io(struct lpfc_hba * phba)
+{
+	unsigned long iflag;
+	uint8_t actcmd = MBX_HEARTBEAT;
+	unsigned long timeout;
+
+	timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
+	if (phba->sli.mbox_active) {
+		actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
+		/* Determine how long we might wait for the active mailbox
+		 * command to be gracefully completed by firmware.
+		 */
+		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
+				phba->sli.mbox_active) * 1000) + jiffies;
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+	/* Wait for the outstnading mailbox command to complete */
+	while (phba->sli.mbox_active) {
+		/* Check active mailbox complete status every 2ms */
+		msleep(2);
+		if (time_after(jiffies, timeout)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2813 Mgmt IO is Blocked %x "
+				"- mbox cmd %x still active\n",
+				phba->sli.sli_flag, actcmd);
+			break;
+		}
+	}
+}
+
+/**
+ * lpfc_sli4_node_prep - Assign RPIs for active nodes.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * Allocate RPIs for all active remote nodes. This is needed whenever
+ * an SLI4 adapter is reset and the driver is not unloading. Its purpose
+ * is to fixup the temporary rpi assignments.
+ **/
+void
+lpfc_sli4_node_prep(struct lpfc_hba *phba)
+{
+	struct lpfc_nodelist  *ndlp, *next_ndlp;
+	struct lpfc_vport **vports;
+	int i;
+
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		return;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			if (vports[i]->load_flag & FC_UNLOADING)
+				continue;
+
+			list_for_each_entry_safe(ndlp, next_ndlp,
+						 &vports[i]->fc_nodes,
+						 nlp_listp) {
+				if (NLP_CHK_NODE_ACT(ndlp))
+					ndlp->nlp_rpi =
+						lpfc_sli4_alloc_rpi(phba);
+			}
+		}
+	}
+	lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
+ * lpfc_online - Initialize and bring a HBA online
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine initializes the HBA and brings a HBA online. During this
+ * process, the management interface is blocked to prevent user space access
+ * to the HBA interfering with the driver initialization.
+ *
+ * Return codes
+ *   0 - successful
+ *   1 - failed
+ **/
+int
+lpfc_online(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport;
+	struct lpfc_vport **vports;
+	int i;
+
+	if (!phba)
+		return 0;
+	vport = phba->pport;
+
+	if (!(vport->fc_flag & FC_OFFLINE_MODE))
+		return 0;
+
+	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+			"0458 Bring Adapter online\n");
+
+	lpfc_block_mgmt_io(phba);
+
+	if (!lpfc_sli_queue_setup(phba)) {
+		lpfc_unblock_mgmt_io(phba);
+		return 1;
+	}
+
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
+			lpfc_unblock_mgmt_io(phba);
+			return 1;
+		}
+	} else {
+		if (lpfc_sli_hba_setup(phba)) {	/* Initialize SLI2/SLI3 HBA */
+			lpfc_unblock_mgmt_io(phba);
+			return 1;
+		}
+	}
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			struct Scsi_Host *shost;
+			shost = lpfc_shost_from_vport(vports[i]);
+			spin_lock_irq(shost->host_lock);
+			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
+			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+			if (phba->sli_rev == LPFC_SLI_REV4)
+				vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+			spin_unlock_irq(shost->host_lock);
+		}
+		lpfc_destroy_vport_work_array(phba, vports);
+
+	lpfc_unblock_mgmt_io(phba);
+	return 0;
+}
+
+/**
+ * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine marks a HBA's management interface as not blocked. Once the
+ * HBA's management interface is marked as not blocked, all the user space
+ * access to the HBA, whether they are from sysfs interface or libdfc
+ * interface will be allowed. The HBA is set to block the management interface
+ * when the driver prepares the HBA interface for online or offline and then
+ * set to unblock the management interface afterwards.
+ **/
+void
+lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
+{
+	unsigned long iflag;
+
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+}
+
+/**
+ * lpfc_offline_prep - Prepare a HBA to be brought offline
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to prepare a HBA to be brought offline. It performs
+ * unregistration login to all the nodes on all vports and flushes the mailbox
+ * queue to make it ready to be brought offline.
+ **/
+void
+lpfc_offline_prep(struct lpfc_hba * phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+	struct lpfc_nodelist  *ndlp, *next_ndlp;
+	struct lpfc_vport **vports;
+	struct Scsi_Host *shost;
+	int i;
+
+	if (vport->fc_flag & FC_OFFLINE_MODE)
+		return;
+
+	lpfc_block_mgmt_io(phba);
+
+	lpfc_linkdown(phba);
+
+	/* Issue an unreg_login to all nodes on all vports */
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL) {
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			if (vports[i]->load_flag & FC_UNLOADING)
+				continue;
+			shost = lpfc_shost_from_vport(vports[i]);
+			spin_lock_irq(shost->host_lock);
+			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
+			vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+			vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
+			spin_unlock_irq(shost->host_lock);
+
+			shost =	lpfc_shost_from_vport(vports[i]);
+			list_for_each_entry_safe(ndlp, next_ndlp,
+						 &vports[i]->fc_nodes,
+						 nlp_listp) {
+				if (!NLP_CHK_NODE_ACT(ndlp))
+					continue;
+				if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+					continue;
+				if (ndlp->nlp_type & NLP_FABRIC) {
+					lpfc_disc_state_machine(vports[i], ndlp,
+						NULL, NLP_EVT_DEVICE_RECOVERY);
+					lpfc_disc_state_machine(vports[i], ndlp,
+						NULL, NLP_EVT_DEVICE_RM);
+				}
+				spin_lock_irq(shost->host_lock);
+				ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+				spin_unlock_irq(shost->host_lock);
+				/*
+				 * Whenever an SLI4 port goes offline, free the
+				 * RPI. Get a new RPI when the adapter port
+				 * comes back online.
+				 */
+				if (phba->sli_rev == LPFC_SLI_REV4)
+					lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+				lpfc_unreg_rpi(vports[i], ndlp);
+			}
+		}
+	}
+	lpfc_destroy_vport_work_array(phba, vports);
+
+	lpfc_sli_mbox_sys_shutdown(phba);
+}
+
+/**
+ * lpfc_offline - Bring a HBA offline
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine actually brings a HBA offline. It stops all the timers
+ * associated with the HBA, brings down the SLI layer, and eventually
+ * marks the HBA as in offline state for the upper layer protocol.
+ **/
+void
+lpfc_offline(struct lpfc_hba *phba)
+{
+	struct Scsi_Host  *shost;
+	struct lpfc_vport **vports;
+	int i;
+
+	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+		return;
+
+	/* stop port and all timers associated with this hba */
+	lpfc_stop_port(phba);
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
+			lpfc_stop_vport_timers(vports[i]);
+	lpfc_destroy_vport_work_array(phba, vports);
+	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+			"0460 Bring Adapter offline\n");
+	/* Bring down the SLI Layer and cleanup.  The HBA is offline
+	   now.  */
+	lpfc_sli_hba_down(phba);
+	spin_lock_irq(&phba->hbalock);
+	phba->work_ha = 0;
+	spin_unlock_irq(&phba->hbalock);
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			shost = lpfc_shost_from_vport(vports[i]);
+			spin_lock_irq(shost->host_lock);
+			vports[i]->work_port_events = 0;
+			vports[i]->fc_flag |= FC_OFFLINE_MODE;
+			spin_unlock_irq(shost->host_lock);
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
+ * lpfc_scsi_buf_update - Update the scsi_buffers that are already allocated.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine goes through all the scsi buffers in the system and updates the
+ * Physical XRIs assigned to the SCSI buffer because these may change after any
+ * firmware reset
+ *
+ * Return codes
+ *   0 - successful (for now, it always returns 0)
+ **/
+int
+lpfc_scsi_buf_update(struct lpfc_hba *phba)
+{
+	struct lpfc_scsi_buf *sb, *sb_next;
+
+	spin_lock_irq(&phba->hbalock);
+	spin_lock(&phba->scsi_buf_list_lock);
+	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
+		sb->cur_iocbq.sli4_xritag =
+			phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag];
+		set_bit(sb->cur_iocbq.sli4_lxritag, phba->sli4_hba.xri_bmask);
+		phba->sli4_hba.max_cfg_param.xri_used++;
+		phba->sli4_hba.xri_count++;
+	}
+	spin_unlock(&phba->scsi_buf_list_lock);
+	spin_unlock_irq(&phba->hbalock);
+	return 0;
+}
+
+/**
+ * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to free all the SCSI buffers and IOCBs from the driver
+ * list back to kernel. It is called from lpfc_pci_remove_one to free
+ * the internal resources before the device is removed from the system.
+ *
+ * Return codes
+ *   0 - successful (for now, it always returns 0)
+ **/
+static int
+lpfc_scsi_free(struct lpfc_hba *phba)
+{
+	struct lpfc_scsi_buf *sb, *sb_next;
+	struct lpfc_iocbq *io, *io_next;
+
+	spin_lock_irq(&phba->hbalock);
+	/* Release all the lpfc_scsi_bufs maintained by this host. */
+	spin_lock(&phba->scsi_buf_list_lock);
+	list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
+		list_del(&sb->list);
+		pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
+			      sb->dma_handle);
+		kfree(sb);
+		phba->total_scsi_bufs--;
+	}
+	spin_unlock(&phba->scsi_buf_list_lock);
+
+	/* Release all the lpfc_iocbq entries maintained by this host. */
+	list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
+		list_del(&io->list);
+		kfree(io);
+		phba->total_iocbq_bufs--;
+	}
+
+	spin_unlock_irq(&phba->hbalock);
+	return 0;
+}
+
+/**
+ * lpfc_create_port - Create an FC port
+ * @phba: pointer to lpfc hba data structure.
+ * @instance: a unique integer ID to this FC port.
+ * @dev: pointer to the device data structure.
+ *
+ * This routine creates a FC port for the upper layer protocol. The FC port
+ * can be created on top of either a physical port or a virtual port provided
+ * by the HBA. This routine also allocates a SCSI host data structure (shost)
+ * and associates the FC port created before adding the shost into the SCSI
+ * layer.
+ *
+ * Return codes
+ *   @vport - pointer to the virtual N_Port data structure.
+ *   NULL - port create failed.
+ **/
+struct lpfc_vport *
+lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
+{
+	struct lpfc_vport *vport;
+	struct Scsi_Host  *shost;
+	int error = 0;
+
+	if (dev != &phba->pcidev->dev)
+		shost = scsi_host_alloc(&lpfc_vport_template,
+					sizeof(struct lpfc_vport));
+	else
+		shost = scsi_host_alloc(&lpfc_template,
+					sizeof(struct lpfc_vport));
+	if (!shost)
+		goto out;
+
+	vport = (struct lpfc_vport *) shost->hostdata;
+	vport->phba = phba;
+	vport->load_flag |= FC_LOADING;
+	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+	vport->fc_rscn_flush = 0;
+
+	lpfc_get_vport_cfgparam(vport);
+	shost->unique_id = instance;
+	shost->max_id = LPFC_MAX_TARGET;
+	shost->max_lun = vport->cfg_max_luns;
+	shost->this_id = -1;
+	shost->max_cmd_len = 16;
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		shost->dma_boundary =
+			phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
+		shost->sg_tablesize = phba->cfg_sg_seg_cnt;
+	}
+
+	/*
+	 * Set initial can_queue value since 0 is no longer supported and
+	 * scsi_add_host will fail. This will be adjusted later based on the
+	 * max xri value determined in hba setup.
+	 */
+	shost->can_queue = phba->cfg_hba_queue_depth - 10;
+	if (dev != &phba->pcidev->dev) {
+		shost->transportt = lpfc_vport_transport_template;
+		vport->port_type = LPFC_NPIV_PORT;
+	} else {
+		shost->transportt = lpfc_transport_template;
+		vport->port_type = LPFC_PHYSICAL_PORT;
+	}
+
+	/* Initialize all internally managed lists. */
+	INIT_LIST_HEAD(&vport->fc_nodes);
+	INIT_LIST_HEAD(&vport->rcv_buffer_list);
+	spin_lock_init(&vport->work_port_lock);
+
+	init_timer(&vport->fc_disctmo);
+	vport->fc_disctmo.function = lpfc_disc_timeout;
+	vport->fc_disctmo.data = (unsigned long)vport;
+
+	init_timer(&vport->fc_fdmitmo);
+	vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
+	vport->fc_fdmitmo.data = (unsigned long)vport;
+
+	init_timer(&vport->els_tmofunc);
+	vport->els_tmofunc.function = lpfc_els_timeout;
+	vport->els_tmofunc.data = (unsigned long)vport;
+
+	init_timer(&vport->delayed_disc_tmo);
+	vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
+	vport->delayed_disc_tmo.data = (unsigned long)vport;
+
+	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
+	if (error)
+		goto out_put_shost;
+
+	spin_lock_irq(&phba->hbalock);
+	list_add_tail(&vport->listentry, &phba->port_list);
+	spin_unlock_irq(&phba->hbalock);
+	return vport;
+
+out_put_shost:
+	scsi_host_put(shost);
+out:
+	return NULL;
+}
+
+/**
+ * destroy_port -  destroy an FC port
+ * @vport: pointer to an lpfc virtual N_Port data structure.
+ *
+ * This routine destroys a FC port from the upper layer protocol. All the
+ * resources associated with the port are released.
+ **/
+void
+destroy_port(struct lpfc_vport *vport)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+
+	lpfc_debugfs_terminate(vport);
+	fc_remove_host(shost);
+	scsi_remove_host(shost);
+
+	spin_lock_irq(&phba->hbalock);
+	list_del_init(&vport->listentry);
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_cleanup(vport);
+	return;
+}
+
+/**
+ * lpfc_get_instance - Get a unique integer ID
+ *
+ * This routine allocates a unique integer ID from lpfc_hba_index pool. It
+ * uses the kernel idr facility to perform the task.
+ *
+ * Return codes:
+ *   instance - a unique integer ID allocated as the new instance.
+ *   -1 - lpfc get instance failed.
+ **/
+int
+lpfc_get_instance(void)
+{
+	int instance = 0;
+
+	/* Assign an unused number */
+	if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
+		return -1;
+	if (idr_get_new(&lpfc_hba_index, NULL, &instance))
+		return -1;
+	return instance;
+}
+
+/**
+ * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
+ * @shost: pointer to SCSI host data structure.
+ * @time: elapsed time of the scan in jiffies.
+ *
+ * This routine is called by the SCSI layer with a SCSI host to determine
+ * whether the scan host is finished.
+ *
+ * Note: there is no scan_start function as adapter initialization will have
+ * asynchronously kicked off the link initialization.
+ *
+ * Return codes
+ *   0 - SCSI host scan is not over yet.
+ *   1 - SCSI host scan is over.
+ **/
+int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	int stat = 0;
+
+	spin_lock_irq(shost->host_lock);
+
+	if (vport->load_flag & FC_UNLOADING) {
+		stat = 1;
+		goto finished;
+	}
+	if (time >= 30 * HZ) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0461 Scanning longer than 30 "
+				"seconds.  Continuing initialization\n");
+		stat = 1;
+		goto finished;
+	}
+	if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0465 Link down longer than 15 "
+				"seconds.  Continuing initialization\n");
+		stat = 1;
+		goto finished;
+	}
+
+	if (vport->port_state != LPFC_VPORT_READY)
+		goto finished;
+	if (vport->num_disc_nodes || vport->fc_prli_sent)
+		goto finished;
+	if (vport->fc_map_cnt == 0 && time < 2 * HZ)
+		goto finished;
+	if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
+		goto finished;
+
+	stat = 1;
+
+finished:
+	spin_unlock_irq(shost->host_lock);
+	return stat;
+}
+
+/**
+ * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
+ * @shost: pointer to SCSI host data structure.
+ *
+ * This routine initializes a given SCSI host attributes on a FC port. The
+ * SCSI host can be either on top of a physical port or a virtual port.
+ **/
+void lpfc_host_attrib_init(struct Scsi_Host *shost)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	/*
+	 * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
+	 */
+
+	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
+	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
+	fc_host_supported_classes(shost) = FC_COS_CLASS3;
+
+	memset(fc_host_supported_fc4s(shost), 0,
+	       sizeof(fc_host_supported_fc4s(shost)));
+	fc_host_supported_fc4s(shost)[2] = 1;
+	fc_host_supported_fc4s(shost)[7] = 1;
+
+	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
+				 sizeof fc_host_symbolic_name(shost));
+
+	fc_host_supported_speeds(shost) = 0;
+	if (phba->lmt & LMT_16Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
+	if (phba->lmt & LMT_10Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
+	if (phba->lmt & LMT_8Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
+	if (phba->lmt & LMT_4Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
+	if (phba->lmt & LMT_2Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
+	if (phba->lmt & LMT_1Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
+
+	fc_host_maxframe_size(shost) =
+		(((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
+		(uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
+
+	fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
+
+	/* This value is also unchanging */
+	memset(fc_host_active_fc4s(shost), 0,
+	       sizeof(fc_host_active_fc4s(shost)));
+	fc_host_active_fc4s(shost)[2] = 1;
+	fc_host_active_fc4s(shost)[7] = 1;
+
+	fc_host_max_npiv_vports(shost) = phba->max_vpi;
+	spin_lock_irq(shost->host_lock);
+	vport->load_flag &= ~FC_LOADING;
+	spin_unlock_irq(shost->host_lock);
+}
+
+/**
+ * lpfc_stop_port_s3 - Stop SLI3 device port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to stop an SLI3 device port, it stops the device
+ * from generating interrupts and stops the device driver's timers for the
+ * device.
+ **/
+static void
+lpfc_stop_port_s3(struct lpfc_hba *phba)
+{
+	/* Clear all interrupt enable conditions */
+	writel(0, phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+	/* Clear all pending interrupts */
+	writel(0xffffffff, phba->HAregaddr);
+	readl(phba->HAregaddr); /* flush */
+
+	/* Reset some HBA SLI setup states */
+	lpfc_stop_hba_timers(phba);
+	phba->pport->work_port_events = 0;
+}
+
+/**
+ * lpfc_stop_port_s4 - Stop SLI4 device port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to stop an SLI4 device port, it stops the device
+ * from generating interrupts and stops the device driver's timers for the
+ * device.
+ **/
+static void
+lpfc_stop_port_s4(struct lpfc_hba *phba)
+{
+	/* Reset some HBA SLI4 setup states */
+	lpfc_stop_hba_timers(phba);
+	phba->pport->work_port_events = 0;
+	phba->sli4_hba.intr_enable = 0;
+}
+
+/**
+ * lpfc_stop_port - Wrapper function for stopping hba port
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
+ * the API jump table function pointer from the lpfc_hba struct.
+ **/
+void
+lpfc_stop_port(struct lpfc_hba *phba)
+{
+	phba->lpfc_stop_port(phba);
+}
+
+/**
+ * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
+ * @phba: Pointer to hba for which this call is being executed.
+ *
+ * This routine starts the timer waiting for the FCF rediscovery to complete.
+ **/
+void
+lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
+{
+	unsigned long fcf_redisc_wait_tmo =
+		(jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
+	/* Start fcf rediscovery wait period timer */
+	mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
+	spin_lock_irq(&phba->hbalock);
+	/* Allow action to new fcf asynchronous event */
+	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
+	/* Mark the FCF rediscovery pending state */
+	phba->fcf.fcf_flag |= FCF_REDISC_PEND;
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
+ * @ptr: Map to lpfc_hba data structure pointer.
+ *
+ * This routine is invoked when waiting for FCF table rediscover has been
+ * timed out. If new FCF record(s) has (have) been discovered during the
+ * wait period, a new FCF event shall be added to the FCOE async event
+ * list, and then worker thread shall be waked up for processing from the
+ * worker thread context.
+ **/
+void
+lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
+{
+	struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+
+	/* Don't send FCF rediscovery event if timer cancelled */
+	spin_lock_irq(&phba->hbalock);
+	if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+	/* Clear FCF rediscovery timer pending flag */
+	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
+	/* FCF rediscovery event to worker thread */
+	phba->fcf.fcf_flag |= FCF_REDISC_EVT;
+	spin_unlock_irq(&phba->hbalock);
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"2776 FCF rediscover quiescent timer expired\n");
+	/* wake up worker thread */
+	lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
+ *
+ * This routine is to parse the SLI4 link-attention link fault code and
+ * translate it into the base driver's read link attention mailbox command
+ * status.
+ *
+ * Return: Link-attention status in terms of base driver's coding.
+ **/
+static uint16_t
+lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
+			   struct lpfc_acqe_link *acqe_link)
+{
+	uint16_t latt_fault;
+
+	switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
+	case LPFC_ASYNC_LINK_FAULT_NONE:
+	case LPFC_ASYNC_LINK_FAULT_LOCAL:
+	case LPFC_ASYNC_LINK_FAULT_REMOTE:
+		latt_fault = 0;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0398 Invalid link fault code: x%x\n",
+				bf_get(lpfc_acqe_link_fault, acqe_link));
+		latt_fault = MBXERR_ERROR;
+		break;
+	}
+	return latt_fault;
+}
+
+/**
+ * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
+ *
+ * This routine is to parse the SLI4 link attention type and translate it
+ * into the base driver's link attention type coding.
+ *
+ * Return: Link attention type in terms of base driver's coding.
+ **/
+static uint8_t
+lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
+			  struct lpfc_acqe_link *acqe_link)
+{
+	uint8_t att_type;
+
+	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
+	case LPFC_ASYNC_LINK_STATUS_DOWN:
+	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
+		att_type = LPFC_ATT_LINK_DOWN;
+		break;
+	case LPFC_ASYNC_LINK_STATUS_UP:
+		/* Ignore physical link up events - wait for logical link up */
+		att_type = LPFC_ATT_RESERVED;
+		break;
+	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
+		att_type = LPFC_ATT_LINK_UP;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0399 Invalid link attention type: x%x\n",
+				bf_get(lpfc_acqe_link_status, acqe_link));
+		att_type = LPFC_ATT_RESERVED;
+		break;
+	}
+	return att_type;
+}
+
+/**
+ * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
+ *
+ * This routine is to parse the SLI4 link-attention link speed and translate
+ * it into the base driver's link-attention link speed coding.
+ *
+ * Return: Link-attention link speed in terms of base driver's coding.
+ **/
+static uint8_t
+lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
+				struct lpfc_acqe_link *acqe_link)
+{
+	uint8_t link_speed;
+
+	switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
+	case LPFC_ASYNC_LINK_SPEED_ZERO:
+	case LPFC_ASYNC_LINK_SPEED_10MBPS:
+	case LPFC_ASYNC_LINK_SPEED_100MBPS:
+		link_speed = LPFC_LINK_SPEED_UNKNOWN;
+		break;
+	case LPFC_ASYNC_LINK_SPEED_1GBPS:
+		link_speed = LPFC_LINK_SPEED_1GHZ;
+		break;
+	case LPFC_ASYNC_LINK_SPEED_10GBPS:
+		link_speed = LPFC_LINK_SPEED_10GHZ;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0483 Invalid link-attention link speed: x%x\n",
+				bf_get(lpfc_acqe_link_speed, acqe_link));
+		link_speed = LPFC_LINK_SPEED_UNKNOWN;
+		break;
+	}
+	return link_speed;
+}
+
+/**
+ * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous FCoE link event.
+ **/
+static void
+lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
+			 struct lpfc_acqe_link *acqe_link)
+{
+	struct lpfc_dmabuf *mp;
+	LPFC_MBOXQ_t *pmb;
+	MAILBOX_t *mb;
+	struct lpfc_mbx_read_top *la;
+	uint8_t att_type;
+	int rc;
+
+	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
+	if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
+		return;
+	phba->fcoe_eventtag = acqe_link->event_tag;
+	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0395 The mboxq allocation failed\n");
+		return;
+	}
+	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!mp) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0396 The lpfc_dmabuf allocation failed\n");
+		goto out_free_pmb;
+	}
+	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+	if (!mp->virt) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0397 The mbuf allocation failed\n");
+		goto out_free_dmabuf;
+	}
+
+	/* Cleanup any outstanding ELS commands */
+	lpfc_els_flush_all_cmd(phba);
+
+	/* Block ELS IOCBs until we have done process link event */
+	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
+
+	/* Update link event statistics */
+	phba->sli.slistat.link_event++;
+
+	/* Create lpfc_handle_latt mailbox command from link ACQE */
+	lpfc_read_topology(phba, pmb, mp);
+	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
+	pmb->vport = phba->pport;
+
+	/* Keep the link status for extra SLI4 state machine reference */
+	phba->sli4_hba.link_state.speed =
+				bf_get(lpfc_acqe_link_speed, acqe_link);
+	phba->sli4_hba.link_state.duplex =
+				bf_get(lpfc_acqe_link_duplex, acqe_link);
+	phba->sli4_hba.link_state.status =
+				bf_get(lpfc_acqe_link_status, acqe_link);
+	phba->sli4_hba.link_state.type =
+				bf_get(lpfc_acqe_link_type, acqe_link);
+	phba->sli4_hba.link_state.number =
+				bf_get(lpfc_acqe_link_number, acqe_link);
+	phba->sli4_hba.link_state.fault =
+				bf_get(lpfc_acqe_link_fault, acqe_link);
+	phba->sli4_hba.link_state.logical_speed =
+			bf_get(lpfc_acqe_logical_link_speed, acqe_link);
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"2900 Async FC/FCoE Link event - Speed:%dGBit "
+			"duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
+			"Logical speed:%dMbps Fault:%d\n",
+			phba->sli4_hba.link_state.speed,
+			phba->sli4_hba.link_state.topology,
+			phba->sli4_hba.link_state.status,
+			phba->sli4_hba.link_state.type,
+			phba->sli4_hba.link_state.number,
+			phba->sli4_hba.link_state.logical_speed * 10,
+			phba->sli4_hba.link_state.fault);
+	/*
+	 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
+	 * topology info. Note: Optional for non FC-AL ports.
+	 */
+	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+		if (rc == MBX_NOT_FINISHED)
+			goto out_free_dmabuf;
+		return;
+	}
+	/*
+	 * For FCoE Mode: fill in all the topology information we need and call
+	 * the READ_TOPOLOGY completion routine to continue without actually
+	 * sending the READ_TOPOLOGY mailbox command to the port.
+	 */
+	/* Parse and translate status field */
+	mb = &pmb->u.mb;
+	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
+
+	/* Parse and translate link attention fields */
+	la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
+	la->eventTag = acqe_link->event_tag;
+	bf_set(lpfc_mbx_read_top_att_type, la, att_type);
+	bf_set(lpfc_mbx_read_top_link_spd, la,
+	       lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
+
+	/* Fake the the following irrelvant fields */
+	bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
+	bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
+	bf_set(lpfc_mbx_read_top_il, la, 0);
+	bf_set(lpfc_mbx_read_top_pb, la, 0);
+	bf_set(lpfc_mbx_read_top_fa, la, 0);
+	bf_set(lpfc_mbx_read_top_mm, la, 0);
+
+	/* Invoke the lpfc_handle_latt mailbox command callback function */
+	lpfc_mbx_cmpl_read_topology(phba, pmb);
+
+	return;
+
+out_free_dmabuf:
+	kfree(mp);
+out_free_pmb:
+	mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_fc: pointer to the async fc completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous FC event. It will simply log
+ * that the event was received and then issue a read_topology mailbox command so
+ * that the rest of the driver will treat it the same as SLI3.
+ **/
+static void
+lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
+{
+	struct lpfc_dmabuf *mp;
+	LPFC_MBOXQ_t *pmb;
+	int rc;
+
+	if (bf_get(lpfc_trailer_type, acqe_fc) !=
+	    LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2895 Non FC link Event detected.(%d)\n",
+				bf_get(lpfc_trailer_type, acqe_fc));
+		return;
+	}
+	/* Keep the link status for extra SLI4 state machine reference */
+	phba->sli4_hba.link_state.speed =
+				bf_get(lpfc_acqe_fc_la_speed, acqe_fc);
+	phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
+	phba->sli4_hba.link_state.topology =
+				bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
+	phba->sli4_hba.link_state.status =
+				bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
+	phba->sli4_hba.link_state.type =
+				bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
+	phba->sli4_hba.link_state.number =
+				bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
+	phba->sli4_hba.link_state.fault =
+				bf_get(lpfc_acqe_link_fault, acqe_fc);
+	phba->sli4_hba.link_state.logical_speed =
+				bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc);
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"2896 Async FC event - Speed:%dGBaud Topology:x%x "
+			"LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
+			"%dMbps Fault:%d\n",
+			phba->sli4_hba.link_state.speed,
+			phba->sli4_hba.link_state.topology,
+			phba->sli4_hba.link_state.status,
+			phba->sli4_hba.link_state.type,
+			phba->sli4_hba.link_state.number,
+			phba->sli4_hba.link_state.logical_speed * 10,
+			phba->sli4_hba.link_state.fault);
+	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2897 The mboxq allocation failed\n");
+		return;
+	}
+	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!mp) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2898 The lpfc_dmabuf allocation failed\n");
+		goto out_free_pmb;
+	}
+	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+	if (!mp->virt) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2899 The mbuf allocation failed\n");
+		goto out_free_dmabuf;
+	}
+
+	/* Cleanup any outstanding ELS commands */
+	lpfc_els_flush_all_cmd(phba);
+
+	/* Block ELS IOCBs until we have done process link event */
+	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
+
+	/* Update link event statistics */
+	phba->sli.slistat.link_event++;
+
+	/* Create lpfc_handle_latt mailbox command from link ACQE */
+	lpfc_read_topology(phba, pmb, mp);
+	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
+	pmb->vport = phba->pport;
+
+	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED)
+		goto out_free_dmabuf;
+	return;
+
+out_free_dmabuf:
+	kfree(mp);
+out_free_pmb:
+	mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_fc: pointer to the async SLI completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous SLI events.
+ **/
+static void
+lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
+{
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"2901 Async SLI event - Event Data1:x%08x Event Data2:"
+			"x%08x SLI Event Type:%d",
+			acqe_sli->event_data1, acqe_sli->event_data2,
+			bf_get(lpfc_trailer_type, acqe_sli));
+	return;
+}
+
+/**
+ * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
+ * @vport: pointer to vport data structure.
+ *
+ * This routine is to perform Clear Virtual Link (CVL) on a vport in
+ * response to a CVL event.
+ *
+ * Return the pointer to the ndlp with the vport if successful, otherwise
+ * return NULL.
+ **/
+static struct lpfc_nodelist *
+lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost;
+	struct lpfc_hba *phba;
+
+	if (!vport)
+		return NULL;
+	phba = vport->phba;
+	if (!phba)
+		return NULL;
+	ndlp = lpfc_findnode_did(vport, Fabric_DID);
+	if (!ndlp) {
+		/* Cannot find existing Fabric ndlp, so allocate a new one */
+		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+		if (!ndlp)
+			return 0;
+		lpfc_nlp_init(vport, ndlp, Fabric_DID);
+		/* Set the node type */
+		ndlp->nlp_type |= NLP_FABRIC;
+		/* Put ndlp onto node list */
+		lpfc_enqueue_node(vport, ndlp);
+	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+		/* re-setup ndlp without removing from node list */
+		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+		if (!ndlp)
+			return 0;
+	}
+	if ((phba->pport->port_state < LPFC_FLOGI) &&
+		(phba->pport->port_state != LPFC_VPORT_FAILED))
+		return NULL;
+	/* If virtual link is not yet instantiated ignore CVL */
+	if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
+		&& (vport->port_state != LPFC_VPORT_FAILED))
+		return NULL;
+	shost = lpfc_shost_from_vport(vport);
+	if (!shost)
+		return NULL;
+	lpfc_linkdown_port(vport);
+	lpfc_cleanup_pending_mbox(vport);
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag |= FC_VPORT_CVL_RCVD;
+	spin_unlock_irq(shost->host_lock);
+
+	return ndlp;
+}
+
+/**
+ * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
+ * @vport: pointer to lpfc hba data structure.
+ *
+ * This routine is to perform Clear Virtual Link (CVL) on all vports in
+ * response to a FCF dead event.
+ **/
+static void
+lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	int i;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
+			lpfc_sli4_perform_vport_cvl(vports[i]);
+	lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
+ * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async fcoe completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous fcoe event.
+ **/
+static void
+lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
+			struct lpfc_acqe_fip *acqe_fip)
+{
+	uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
+	int rc;
+	struct lpfc_vport *vport;
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host  *shost;
+	int active_vlink_present;
+	struct lpfc_vport **vports;
+	int i;
+
+	phba->fc_eventTag = acqe_fip->event_tag;
+	phba->fcoe_eventtag = acqe_fip->event_tag;
+	switch (event_type) {
+	case LPFC_FIP_EVENT_TYPE_NEW_FCF:
+	case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
+		if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
+			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+					LOG_DISCOVERY,
+					"2546 New FCF event, evt_tag:x%x, "
+					"index:x%x\n",
+					acqe_fip->event_tag,
+					acqe_fip->index);
+		else
+			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
+					LOG_DISCOVERY,
+					"2788 FCF param modified event, "
+					"evt_tag:x%x, index:x%x\n",
+					acqe_fip->event_tag,
+					acqe_fip->index);
+		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+			/*
+			 * During period of FCF discovery, read the FCF
+			 * table record indexed by the event to update
+			 * FCF roundrobin failover eligible FCF bmask.
+			 */
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
+					LOG_DISCOVERY,
+					"2779 Read FCF (x%x) for updating "
+					"roundrobin FCF failover bmask\n",
+					acqe_fip->index);
+			rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
+		}
+
+		/* If the FCF discovery is in progress, do nothing. */
+		spin_lock_irq(&phba->hbalock);
+		if (phba->hba_flag & FCF_TS_INPROG) {
+			spin_unlock_irq(&phba->hbalock);
+			break;
+		}
+		/* If fast FCF failover rescan event is pending, do nothing */
+		if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
+			spin_unlock_irq(&phba->hbalock);
+			break;
+		}
+
+		/* If the FCF has been in discovered state, do nothing. */
+		if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
+			spin_unlock_irq(&phba->hbalock);
+			break;
+		}
+		spin_unlock_irq(&phba->hbalock);
+
+		/* Otherwise, scan the entire FCF table and re-discover SAN */
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+				"2770 Start FCF table scan per async FCF "
+				"event, evt_tag:x%x, index:x%x\n",
+				acqe_fip->event_tag, acqe_fip->index);
+		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
+						     LPFC_FCOE_FCF_GET_FIRST);
+		if (rc)
+			lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+					"2547 Issue FCF scan read FCF mailbox "
+					"command failed (x%x)\n", rc);
+		break;
+
+	case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"2548 FCF Table full count 0x%x tag 0x%x\n",
+			bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
+			acqe_fip->event_tag);
+		break;
+
+	case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
+		phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+			"2549 FCF (x%x) disconnected from network, "
+			"tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
+		/*
+		 * If we are in the middle of FCF failover process, clear
+		 * the corresponding FCF bit in the roundrobin bitmap.
+		 */
+		spin_lock_irq(&phba->hbalock);
+		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+			spin_unlock_irq(&phba->hbalock);
+			/* Update FLOGI FCF failover eligible FCF bmask */
+			lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
+			break;
+		}
+		spin_unlock_irq(&phba->hbalock);
+
+		/* If the event is not for currently used fcf do nothing */
+		if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
+			break;
+
+		/*
+		 * Otherwise, request the port to rediscover the entire FCF
+		 * table for a fast recovery from case that the current FCF
+		 * is no longer valid as we are not in the middle of FCF
+		 * failover process already.
+		 */
+		spin_lock_irq(&phba->hbalock);
+		/* Mark the fast failover process in progress */
+		phba->fcf.fcf_flag |= FCF_DEAD_DISC;
+		spin_unlock_irq(&phba->hbalock);
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+				"2771 Start FCF fast failover process due to "
+				"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
+				"\n", acqe_fip->event_tag, acqe_fip->index);
+		rc = lpfc_sli4_redisc_fcf_table(phba);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+					LOG_DISCOVERY,
+					"2772 Issue FCF rediscover mabilbox "
+					"command failed, fail through to FCF "
+					"dead event\n");
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
+			spin_unlock_irq(&phba->hbalock);
+			/*
+			 * Last resort will fail over by treating this
+			 * as a link down to FCF registration.
+			 */
+			lpfc_sli4_fcf_dead_failthrough(phba);
+		} else {
+			/* Reset FCF roundrobin bmask for new discovery */
+			lpfc_sli4_clear_fcf_rr_bmask(phba);
+			/*
+			 * Handling fast FCF failover to a DEAD FCF event is
+			 * considered equalivant to receiving CVL to all vports.
+			 */
+			lpfc_sli4_perform_all_vport_cvl(phba);
+		}
+		break;
+	case LPFC_FIP_EVENT_TYPE_CVL:
+		phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+			"2718 Clear Virtual Link Received for VPI 0x%x"
+			" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
+
+		vport = lpfc_find_vport_by_vpid(phba,
+						acqe_fip->index);
+		ndlp = lpfc_sli4_perform_vport_cvl(vport);
+		if (!ndlp)
+			break;
+		active_vlink_present = 0;
+
+		vports = lpfc_create_vport_work_array(phba);
+		if (vports) {
+			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
+					i++) {
+				if ((!(vports[i]->fc_flag &
+					FC_VPORT_CVL_RCVD)) &&
+					(vports[i]->port_state > LPFC_FDISC)) {
+					active_vlink_present = 1;
+					break;
+				}
+			}
+			lpfc_destroy_vport_work_array(phba, vports);
+		}
+
+		if (active_vlink_present) {
+			/*
+			 * If there are other active VLinks present,
+			 * re-instantiate the Vlink using FDISC.
+			 */
+			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+			shost = lpfc_shost_from_vport(vport);
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag |= NLP_DELAY_TMO;
+			spin_unlock_irq(shost->host_lock);
+			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+			vport->port_state = LPFC_FDISC;
+		} else {
+			/*
+			 * Otherwise, we request port to rediscover
+			 * the entire FCF table for a fast recovery
+			 * from possible case that the current FCF
+			 * is no longer valid if we are not already
+			 * in the FCF failover process.
+			 */
+			spin_lock_irq(&phba->hbalock);
+			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+				spin_unlock_irq(&phba->hbalock);
+				break;
+			}
+			/* Mark the fast failover process in progress */
+			phba->fcf.fcf_flag |= FCF_ACVL_DISC;
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
+					LOG_DISCOVERY,
+					"2773 Start FCF failover per CVL, "
+					"evt_tag:x%x\n", acqe_fip->event_tag);
+			rc = lpfc_sli4_redisc_fcf_table(phba);
+			if (rc) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+						LOG_DISCOVERY,
+						"2774 Issue FCF rediscover "
+						"mabilbox command failed, "
+						"through to CVL event\n");
+				spin_lock_irq(&phba->hbalock);
+				phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
+				spin_unlock_irq(&phba->hbalock);
+				/*
+				 * Last resort will be re-try on the
+				 * the current registered FCF entry.
+				 */
+				lpfc_retry_pport_discovery(phba);
+			} else
+				/*
+				 * Reset FCF roundrobin bmask for new
+				 * discovery.
+				 */
+				lpfc_sli4_clear_fcf_rr_bmask(phba);
+		}
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"0288 Unknown FCoE event type 0x%x event tag "
+			"0x%x\n", event_type, acqe_fip->event_tag);
+		break;
+	}
+}
+
+/**
+ * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async dcbx completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous dcbx event.
+ **/
+static void
+lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
+			 struct lpfc_acqe_dcbx *acqe_dcbx)
+{
+	phba->fc_eventTag = acqe_dcbx->event_tag;
+	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"0290 The SLI4 DCBX asynchronous event is not "
+			"handled yet\n");
+}
+
+/**
+ * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async grp5 completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
+ * is an asynchronous notified of a logical link speed change.  The Port
+ * reports the logical link speed in units of 10Mbps.
+ **/
+static void
+lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
+			 struct lpfc_acqe_grp5 *acqe_grp5)
+{
+	uint16_t prev_ll_spd;
+
+	phba->fc_eventTag = acqe_grp5->event_tag;
+	phba->fcoe_eventtag = acqe_grp5->event_tag;
+	prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
+	phba->sli4_hba.link_state.logical_speed =
+		(bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"2789 GRP5 Async Event: Updating logical link speed "
+			"from %dMbps to %dMbps\n", (prev_ll_spd * 10),
+			(phba->sli4_hba.link_state.logical_speed*10));
+}
+
+/**
+ * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 asynchronous events.
+ **/
+void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event;
+
+	/* First, declare the async event has been handled */
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag &= ~ASYNC_EVENT;
+	spin_unlock_irq(&phba->hbalock);
+	/* Now, handle all the async events */
+	while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
+		/* Get the first event from the head of the event queue */
+		spin_lock_irq(&phba->hbalock);
+		list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
+				 cq_event, struct lpfc_cq_event, list);
+		spin_unlock_irq(&phba->hbalock);
+		/* Process the asynchronous event */
+		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
+		case LPFC_TRAILER_CODE_LINK:
+			lpfc_sli4_async_link_evt(phba,
+						 &cq_event->cqe.acqe_link);
+			break;
+		case LPFC_TRAILER_CODE_FCOE:
+			lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
+			break;
+		case LPFC_TRAILER_CODE_DCBX:
+			lpfc_sli4_async_dcbx_evt(phba,
+						 &cq_event->cqe.acqe_dcbx);
+			break;
+		case LPFC_TRAILER_CODE_GRP5:
+			lpfc_sli4_async_grp5_evt(phba,
+						 &cq_event->cqe.acqe_grp5);
+			break;
+		case LPFC_TRAILER_CODE_FC:
+			lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
+			break;
+		case LPFC_TRAILER_CODE_SLI:
+			lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
+			break;
+		default:
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"1804 Invalid asynchrous event code: "
+					"x%x\n", bf_get(lpfc_trailer_code,
+					&cq_event->cqe.mcqe_cmpl));
+			break;
+		}
+		/* Free the completion event processed to the free pool */
+		lpfc_sli4_cq_event_release(phba, cq_event);
+	}
+}
+
+/**
+ * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process FCF table
+ * rediscovery pending completion event.
+ **/
+void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
+{
+	int rc;
+
+	spin_lock_irq(&phba->hbalock);
+	/* Clear FCF rediscovery timeout event */
+	phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
+	/* Clear driver fast failover FCF record flag */
+	phba->fcf.failover_rec.flag = 0;
+	/* Set state for FCF fast failover */
+	phba->fcf.fcf_flag |= FCF_REDISC_FOV;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Scan FCF table from the first entry to re-discover SAN */
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+			"2777 Start post-quiescent FCF table scan\n");
+	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
+	if (rc)
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+				"2747 Issue FCF scan read FCF mailbox "
+				"command failed 0x%x\n", rc);
+}
+
+/**
+ * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
+ * @phba: pointer to lpfc hba data structure.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine is invoked to set up the per HBA PCI-Device group function
+ * API jump table entries.
+ *
+ * Return: 0 if success, otherwise -ENODEV
+ **/
+int
+lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+	int rc;
+
+	/* Set up lpfc PCI-device group */
+	phba->pci_dev_grp = dev_grp;
+
+	/* The LPFC_PCI_DEV_OC uses SLI4 */
+	if (dev_grp == LPFC_PCI_DEV_OC)
+		phba->sli_rev = LPFC_SLI_REV4;
+
+	/* Set up device INIT API function jump table */
+	rc = lpfc_init_api_table_setup(phba, dev_grp);
+	if (rc)
+		return -ENODEV;
+	/* Set up SCSI API function jump table */
+	rc = lpfc_scsi_api_table_setup(phba, dev_grp);
+	if (rc)
+		return -ENODEV;
+	/* Set up SLI API function jump table */
+	rc = lpfc_sli_api_table_setup(phba, dev_grp);
+	if (rc)
+		return -ENODEV;
+	/* Set up MBOX API function jump table */
+	rc = lpfc_mbox_api_table_setup(phba, dev_grp);
+	if (rc)
+		return -ENODEV;
+
+	return 0;
+}
+
+/**
+ * lpfc_log_intr_mode - Log the active interrupt mode
+ * @phba: pointer to lpfc hba data structure.
+ * @intr_mode: active interrupt mode adopted.
+ *
+ * This routine it invoked to log the currently used active interrupt mode
+ * to the device.
+ **/
+static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
+{
+	switch (intr_mode) {
+	case 0:
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0470 Enable INTx interrupt mode.\n");
+		break;
+	case 1:
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0481 Enabled MSI interrupt mode.\n");
+		break;
+	case 2:
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0480 Enabled MSI-X interrupt mode.\n");
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0482 Illegal interrupt mode.\n");
+		break;
+	}
+	return;
+}
+
+/**
+ * lpfc_enable_pci_dev - Enable a generic PCI device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the PCI device that is common to all
+ * PCI devices.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	other values - error
+ **/
+static int
+lpfc_enable_pci_dev(struct lpfc_hba *phba)
+{
+	struct pci_dev *pdev;
+	int bars = 0;
+
+	/* Obtain PCI device reference */
+	if (!phba->pcidev)
+		goto out_error;
+	else
+		pdev = phba->pcidev;
+	/* Select PCI BARs */
+	bars = pci_select_bars(pdev, IORESOURCE_MEM);
+	/* Enable PCI device */
+	if (pci_enable_device_mem(pdev))
+		goto out_error;
+	/* Request PCI resource for the device */
+	if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
+		goto out_disable_device;
+	/* Set up device as PCI master and save state for EEH */
+	pci_set_master(pdev);
+	pci_try_set_mwi(pdev);
+	pci_save_state(pdev);
+
+	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
+	if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
+		pdev->needs_freset = 1;
+
+	return 0;
+
+out_disable_device:
+	pci_disable_device(pdev);
+out_error:
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"1401 Failed to enable pci device, bars:x%x\n", bars);
+	return -ENODEV;
+}
+
+/**
+ * lpfc_disable_pci_dev - Disable a generic PCI device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable the PCI device that is common to all
+ * PCI devices.
+ **/
+static void
+lpfc_disable_pci_dev(struct lpfc_hba *phba)
+{
+	struct pci_dev *pdev;
+	int bars;
+
+	/* Obtain PCI device reference */
+	if (!phba->pcidev)
+		return;
+	else
+		pdev = phba->pcidev;
+	/* Select PCI BARs */
+	bars = pci_select_bars(pdev, IORESOURCE_MEM);
+	/* Release PCI resource and disable PCI device */
+	pci_release_selected_regions(pdev, bars);
+	pci_disable_device(pdev);
+	/* Null out PCI private reference to driver */
+	pci_set_drvdata(pdev, NULL);
+
+	return;
+}
+
+/**
+ * lpfc_reset_hba - Reset a hba
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to reset a hba device. It brings the HBA
+ * offline, performs a board restart, and then brings the board back
+ * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
+ * on outstanding mailbox commands.
+ **/
+void
+lpfc_reset_hba(struct lpfc_hba *phba)
+{
+	/* If resets are disabled then set error state and return. */
+	if (!phba->cfg_enable_hba_reset) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return;
+	}
+	lpfc_offline_prep(phba);
+	lpfc_offline(phba);
+	lpfc_sli_brdrestart(phba);
+	lpfc_online(phba);
+	lpfc_unblock_mgmt_io(phba);
+}
+
+/**
+ * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function enables the PCI SR-IOV virtual functions to a physical
+ * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
+ * enable the number of virtual functions to the physical function. As
+ * not all devices support SR-IOV, the return code from the pci_enable_sriov()
+ * API call does not considered as an error condition for most of the device.
+ **/
+uint16_t
+lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
+{
+	struct pci_dev *pdev = phba->pcidev;
+	uint16_t nr_virtfn;
+	int pos;
+
+	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+	if (pos == 0)
+		return 0;
+
+	pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
+	return nr_virtfn;
+}
+
+/**
+ * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
+ * @phba: pointer to lpfc hba data structure.
+ * @nr_vfn: number of virtual functions to be enabled.
+ *
+ * This function enables the PCI SR-IOV virtual functions to a physical
+ * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
+ * enable the number of virtual functions to the physical function. As
+ * not all devices support SR-IOV, the return code from the pci_enable_sriov()
+ * API call does not considered as an error condition for most of the device.
+ **/
+int
+lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
+{
+	struct pci_dev *pdev = phba->pcidev;
+	uint16_t max_nr_vfn;
+	int rc;
+
+	max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
+	if (nr_vfn > max_nr_vfn) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3057 Requested vfs (%d) greater than "
+				"supported vfs (%d)", nr_vfn, max_nr_vfn);
+		return -EINVAL;
+	}
+
+	rc = pci_enable_sriov(pdev, nr_vfn);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"2806 Failed to enable sriov on this device "
+				"with vfn number nr_vf:%d, rc:%d\n",
+				nr_vfn, rc);
+	} else
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"2807 Successful enable sriov on this device "
+				"with vfn number nr_vf:%d\n", nr_vfn);
+	return rc;
+}
+
+/**
+ * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources specific to
+ * support the SLI-3 HBA device it attached to.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	other values - error
+ **/
+static int
+lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli;
+	int rc;
+
+	/*
+	 * Initialize timers used by driver
+	 */
+
+	/* Heartbeat timer */
+	init_timer(&phba->hb_tmofunc);
+	phba->hb_tmofunc.function = lpfc_hb_timeout;
+	phba->hb_tmofunc.data = (unsigned long)phba;
+
+	psli = &phba->sli;
+	/* MBOX heartbeat timer */
+	init_timer(&psli->mbox_tmo);
+	psli->mbox_tmo.function = lpfc_mbox_timeout;
+	psli->mbox_tmo.data = (unsigned long) phba;
+	/* FCP polling mode timer */
+	init_timer(&phba->fcp_poll_timer);
+	phba->fcp_poll_timer.function = lpfc_poll_timeout;
+	phba->fcp_poll_timer.data = (unsigned long) phba;
+	/* Fabric block timer */
+	init_timer(&phba->fabric_block_timer);
+	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
+	phba->fabric_block_timer.data = (unsigned long) phba;
+	/* EA polling mode timer */
+	init_timer(&phba->eratt_poll);
+	phba->eratt_poll.function = lpfc_poll_eratt;
+	phba->eratt_poll.data = (unsigned long) phba;
+
+	/* Host attention work mask setup */
+	phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
+	phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
+
+	/* Get all the module params for configuring this host */
+	lpfc_get_cfgparam(phba);
+	if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
+		phba->menlo_flag |= HBA_MENLO_SUPPORT;
+		/* check for menlo minimum sg count */
+		if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
+			phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
+	}
+
+	/*
+	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
+	 * used to create the sg_dma_buf_pool must be dynamically calculated.
+	 * 2 segments are added since the IOCB needs a command and response bde.
+	 */
+	phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+		sizeof(struct fcp_rsp) +
+			((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
+
+	if (phba->cfg_enable_bg) {
+		phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
+		phba->cfg_sg_dma_buf_size +=
+			phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
+	}
+
+	/* Also reinitialize the host templates with new values. */
+	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+
+	phba->max_vpi = LPFC_MAX_VPI;
+	/* This will be set to correct value after config_port mbox */
+	phba->max_vports = 0;
+
+	/*
+	 * Initialize the SLI Layer to run with lpfc HBAs.
+	 */
+	lpfc_sli_setup(phba);
+	lpfc_sli_queue_setup(phba);
+
+	/* Allocate device driver memory */
+	if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
+		return -ENOMEM;
+
+	/*
+	 * Enable sr-iov virtual functions if supported and configured
+	 * through the module parameter.
+	 */
+	if (phba->cfg_sriov_nr_virtfn > 0) {
+		rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
+						 phba->cfg_sriov_nr_virtfn);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+					"2808 Requested number of SR-IOV "
+					"virtual functions (%d) is not "
+					"supported\n",
+					phba->cfg_sriov_nr_virtfn);
+			phba->cfg_sriov_nr_virtfn = 0;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the driver internal resources set up
+ * specific for supporting the SLI-3 HBA device it attached to.
+ **/
+static void
+lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
+{
+	/* Free device driver memory allocated */
+	lpfc_mem_free_all(phba);
+
+	return;
+}
+
+/**
+ * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources specific to
+ * support the SLI-4 HBA device it attached to.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	other values - error
+ **/
+static int
+lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli;
+	LPFC_MBOXQ_t *mboxq;
+	int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
+	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
+	struct lpfc_mqe *mqe;
+	int longs, sli_family;
+	int sges_per_segment;
+
+	/* Before proceed, wait for POST done and device ready */
+	rc = lpfc_sli4_post_status_check(phba);
+	if (rc)
+		return -ENODEV;
+
+	/*
+	 * Initialize timers used by driver
+	 */
+
+	/* Heartbeat timer */
+	init_timer(&phba->hb_tmofunc);
+	phba->hb_tmofunc.function = lpfc_hb_timeout;
+	phba->hb_tmofunc.data = (unsigned long)phba;
+	init_timer(&phba->rrq_tmr);
+	phba->rrq_tmr.function = lpfc_rrq_timeout;
+	phba->rrq_tmr.data = (unsigned long)phba;
+
+	psli = &phba->sli;
+	/* MBOX heartbeat timer */
+	init_timer(&psli->mbox_tmo);
+	psli->mbox_tmo.function = lpfc_mbox_timeout;
+	psli->mbox_tmo.data = (unsigned long) phba;
+	/* Fabric block timer */
+	init_timer(&phba->fabric_block_timer);
+	phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
+	phba->fabric_block_timer.data = (unsigned long) phba;
+	/* EA polling mode timer */
+	init_timer(&phba->eratt_poll);
+	phba->eratt_poll.function = lpfc_poll_eratt;
+	phba->eratt_poll.data = (unsigned long) phba;
+	/* FCF rediscover timer */
+	init_timer(&phba->fcf.redisc_wait);
+	phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
+	phba->fcf.redisc_wait.data = (unsigned long)phba;
+
+	/*
+	 * Control structure for handling external multi-buffer mailbox
+	 * command pass-through.
+	 */
+	memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
+		sizeof(struct lpfc_mbox_ext_buf_ctx));
+	INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+
+	/*
+	 * We need to do a READ_CONFIG mailbox command here before
+	 * calling lpfc_get_cfgparam. For VFs this will report the
+	 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
+	 * All of the resources allocated
+	 * for this Port are tied to these values.
+	 */
+	/* Get all the module params for configuring this host */
+	lpfc_get_cfgparam(phba);
+	phba->max_vpi = LPFC_MAX_VPI;
+	/* This will be set to correct value after the read_config mbox */
+	phba->max_vports = 0;
+
+	/* Program the default value of vlan_id and fc_map */
+	phba->valid_vlan = 0;
+	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
+	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
+	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
+
+	/* With BlockGuard we can have multiple SGEs per Data Segemnt */
+	sges_per_segment = 1;
+	if (phba->cfg_enable_bg)
+		sges_per_segment = 2;
+
+	/*
+	 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
+	 * used to create the sg_dma_buf_pool must be dynamically calculated.
+	 * 2 segments are added since the IOCB needs a command and response bde.
+	 * To insure that the scsi sgl does not cross a 4k page boundary only
+	 * sgl sizes of must be a power of 2.
+	 */
+	buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
+		    (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) *
+		    sizeof(struct sli4_sge)));
+
+	sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
+	max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
+	switch (sli_family) {
+	case LPFC_SLI_INTF_FAMILY_BE2:
+	case LPFC_SLI_INTF_FAMILY_BE3:
+		/* There is a single hint for BE - 2 pages per BPL. */
+		if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
+		    LPFC_SLI_INTF_SLI_HINT1_1)
+			max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
+		break;
+	case LPFC_SLI_INTF_FAMILY_LNCR_A0:
+	case LPFC_SLI_INTF_FAMILY_LNCR_B0:
+	default:
+		break;
+	}
+
+	for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
+	     dma_buf_size < max_buf_size && buf_size > dma_buf_size;
+	     dma_buf_size = dma_buf_size << 1)
+		;
+	if (dma_buf_size == max_buf_size)
+		phba->cfg_sg_seg_cnt = (dma_buf_size -
+			sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
+			(2 * sizeof(struct sli4_sge))) /
+				sizeof(struct sli4_sge);
+	phba->cfg_sg_dma_buf_size = dma_buf_size;
+
+	/* Initialize buffer queue management fields */
+	hbq_count = lpfc_sli_hbq_count();
+	for (i = 0; i < hbq_count; ++i)
+		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
+	INIT_LIST_HEAD(&phba->rb_pend_list);
+	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
+	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
+
+	/*
+	 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
+	 */
+	/* Initialize the Abort scsi buffer list used by driver */
+	spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+	/* This abort list used by worker thread */
+	spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
+
+	/*
+	 * Initialize driver internal slow-path work queues
+	 */
+
+	/* Driver internel slow-path CQ Event pool */
+	INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
+	/* Response IOCB work queue list */
+	INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
+	/* Asynchronous event CQ Event work queue list */
+	INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
+	/* Fast-path XRI aborted CQ Event work queue list */
+	INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
+	/* Slow-path XRI aborted CQ Event work queue list */
+	INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
+	/* Receive queue CQ Event work queue list */
+	INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
+
+	/* Initialize extent block lists. */
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
+	INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
+
+	/* Initialize the driver internal SLI layer lists. */
+	lpfc_sli_setup(phba);
+	lpfc_sli_queue_setup(phba);
+
+	/* Allocate device driver memory */
+	rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
+	if (rc)
+		return -ENOMEM;
+
+	/* IF Type 2 ports get initialized now. */
+	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+	    LPFC_SLI_INTF_IF_TYPE_2) {
+		rc = lpfc_pci_function_reset(phba);
+		if (unlikely(rc))
+			return -ENODEV;
+	}
+
+	/* Create the bootstrap mailbox command */
+	rc = lpfc_create_bootstrap_mbox(phba);
+	if (unlikely(rc))
+		goto out_free_mem;
+
+	/* Set up the host's endian order with the device. */
+	rc = lpfc_setup_endian_order(phba);
+	if (unlikely(rc))
+		goto out_free_bsmbx;
+
+	/* Set up the hba's configuration parameters. */
+	rc = lpfc_sli4_read_config(phba);
+	if (unlikely(rc))
+		goto out_free_bsmbx;
+
+	/* IF Type 0 ports get initialized now. */
+	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+	    LPFC_SLI_INTF_IF_TYPE_0) {
+		rc = lpfc_pci_function_reset(phba);
+		if (unlikely(rc))
+			goto out_free_bsmbx;
+	}
+
+	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+						       GFP_KERNEL);
+	if (!mboxq) {
+		rc = -ENOMEM;
+		goto out_free_bsmbx;
+	}
+
+	/* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
+	lpfc_supported_pages(mboxq);
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	if (!rc) {
+		mqe = &mboxq->u.mqe;
+		memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
+		       LPFC_MAX_SUPPORTED_PAGES);
+		for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
+			switch (pn_page[i]) {
+			case LPFC_SLI4_PARAMETERS:
+				phba->sli4_hba.pc_sli4_params.supported = 1;
+				break;
+			default:
+				break;
+			}
+		}
+		/* Read the port's SLI4 Parameters capabilities if supported. */
+		if (phba->sli4_hba.pc_sli4_params.supported)
+			rc = lpfc_pc_sli4_params_get(phba, mboxq);
+		if (rc) {
+			mempool_free(mboxq, phba->mbox_mem_pool);
+			rc = -EIO;
+			goto out_free_bsmbx;
+		}
+	}
+	/*
+	 * Get sli4 parameters that override parameters from Port capabilities.
+	 * If this call fails, it isn't critical unless the SLI4 parameters come
+	 * back in conflict.
+	 */
+	rc = lpfc_get_sli4_parameters(phba, mboxq);
+	if (rc) {
+		if (phba->sli4_hba.extents_in_use &&
+		    phba->sli4_hba.rpi_hdrs_in_use) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2999 Unsupported SLI4 Parameters "
+				"Extents and RPI headers enabled.\n");
+			goto out_free_bsmbx;
+		}
+	}
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	/* Verify all the SLI4 queues */
+	rc = lpfc_sli4_queue_verify(phba);
+	if (rc)
+		goto out_free_bsmbx;
+
+	/* Create driver internal CQE event pool */
+	rc = lpfc_sli4_cq_event_pool_create(phba);
+	if (rc)
+		goto out_free_bsmbx;
+
+	/* Initialize and populate the iocb list per host */
+	rc = lpfc_init_sgl_list(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1400 Failed to initialize sgl list.\n");
+		goto out_destroy_cq_event_pool;
+	}
+	rc = lpfc_init_active_sgl_array(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1430 Failed to initialize sgl list.\n");
+		goto out_free_sgl_list;
+	}
+	rc = lpfc_sli4_init_rpi_hdrs(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1432 Failed to initialize rpi headers.\n");
+		goto out_free_active_sgl;
+	}
+
+	/* Allocate eligible FCF bmask memory for FCF roundrobin failover */
+	longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
+	phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
+					 GFP_KERNEL);
+	if (!phba->fcf.fcf_rr_bmask) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2759 Failed allocate memory for FCF round "
+				"robin failover bmask\n");
+		rc = -ENOMEM;
+		goto out_remove_rpi_hdrs;
+	}
+
+	/*
+	 * The cfg_fcp_eq_count can be zero whenever there is exactly one
+	 * interrupt vector.  This is not an error
+	 */
+	if (phba->cfg_fcp_eq_count) {
+		phba->sli4_hba.fcp_eq_hdl =
+				kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
+				    phba->cfg_fcp_eq_count), GFP_KERNEL);
+		if (!phba->sli4_hba.fcp_eq_hdl) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2572 Failed allocate memory for "
+					"fast-path per-EQ handle array\n");
+			rc = -ENOMEM;
+			goto out_free_fcf_rr_bmask;
+		}
+	}
+
+	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
+				      phba->sli4_hba.cfg_eqn), GFP_KERNEL);
+	if (!phba->sli4_hba.msix_entries) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2573 Failed allocate memory for msi-x "
+				"interrupt vector entries\n");
+		rc = -ENOMEM;
+		goto out_free_fcp_eq_hdl;
+	}
+
+	/*
+	 * Enable sr-iov virtual functions if supported and configured
+	 * through the module parameter.
+	 */
+	if (phba->cfg_sriov_nr_virtfn > 0) {
+		rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
+						 phba->cfg_sriov_nr_virtfn);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+					"3020 Requested number of SR-IOV "
+					"virtual functions (%d) is not "
+					"supported\n",
+					phba->cfg_sriov_nr_virtfn);
+			phba->cfg_sriov_nr_virtfn = 0;
+		}
+	}
+
+	return 0;
+
+out_free_fcp_eq_hdl:
+	kfree(phba->sli4_hba.fcp_eq_hdl);
+out_free_fcf_rr_bmask:
+	kfree(phba->fcf.fcf_rr_bmask);
+out_remove_rpi_hdrs:
+	lpfc_sli4_remove_rpi_hdrs(phba);
+out_free_active_sgl:
+	lpfc_free_active_sgl(phba);
+out_free_sgl_list:
+	lpfc_free_sgl_list(phba);
+out_destroy_cq_event_pool:
+	lpfc_sli4_cq_event_pool_destroy(phba);
+out_free_bsmbx:
+	lpfc_destroy_bootstrap_mbox(phba);
+out_free_mem:
+	lpfc_mem_free(phba);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the driver internal resources set up
+ * specific for supporting the SLI-4 HBA device it attached to.
+ **/
+static void
+lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
+{
+	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
+
+	/* Free memory allocated for msi-x interrupt vector entries */
+	kfree(phba->sli4_hba.msix_entries);
+
+	/* Free memory allocated for fast-path work queue handles */
+	kfree(phba->sli4_hba.fcp_eq_hdl);
+
+	/* Free the allocated rpi headers. */
+	lpfc_sli4_remove_rpi_hdrs(phba);
+	lpfc_sli4_remove_rpis(phba);
+
+	/* Free eligible FCF index bmask */
+	kfree(phba->fcf.fcf_rr_bmask);
+
+	/* Free the ELS sgl list */
+	lpfc_free_active_sgl(phba);
+	lpfc_free_sgl_list(phba);
+
+	/* Free the SCSI sgl management array */
+	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
+
+	/* Free the completion queue EQ event pool */
+	lpfc_sli4_cq_event_release_all(phba);
+	lpfc_sli4_cq_event_pool_destroy(phba);
+
+	/* Release resource identifiers. */
+	lpfc_sli4_dealloc_resource_identifiers(phba);
+
+	/* Free the bsmbx region. */
+	lpfc_destroy_bootstrap_mbox(phba);
+
+	/* Free the SLI Layer memory with SLI4 HBAs */
+	lpfc_mem_free_all(phba);
+
+	/* Free the current connect table */
+	list_for_each_entry_safe(conn_entry, next_conn_entry,
+		&phba->fcf_conn_rec_list, list) {
+		list_del_init(&conn_entry->list);
+		kfree(conn_entry);
+	}
+
+	return;
+}
+
+/**
+ * lpfc_init_api_table_setup - Set up init api function jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the device INIT interface API function jump table
+ * in @phba struct.
+ *
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+	phba->lpfc_hba_init_link = lpfc_hba_init_link;
+	phba->lpfc_hba_down_link = lpfc_hba_down_link;
+	phba->lpfc_selective_reset = lpfc_selective_reset;
+	switch (dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
+		phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
+		phba->lpfc_stop_port = lpfc_stop_port_s3;
+		break;
+	case LPFC_PCI_DEV_OC:
+		phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
+		phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
+		phba->lpfc_stop_port = lpfc_stop_port_s4;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1431 Invalid HBA PCI-device group: 0x%x\n",
+				dev_grp);
+		return -ENODEV;
+		break;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources before the
+ * device specific resource setup to support the HBA device it attached to.
+ *
+ * Return codes
+ *	0 - successful
+ *	other values - error
+ **/
+static int
+lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
+{
+	/*
+	 * Driver resources common to all SLI revisions
+	 */
+	atomic_set(&phba->fast_event_count, 0);
+	spin_lock_init(&phba->hbalock);
+
+	/* Initialize ndlp management spinlock */
+	spin_lock_init(&phba->ndlp_lock);
+
+	INIT_LIST_HEAD(&phba->port_list);
+	INIT_LIST_HEAD(&phba->work_list);
+	init_waitqueue_head(&phba->wait_4_mlo_m_q);
+
+	/* Initialize the wait queue head for the kernel thread */
+	init_waitqueue_head(&phba->work_waitq);
+
+	/* Initialize the scsi buffer list used by driver for scsi IO */
+	spin_lock_init(&phba->scsi_buf_list_lock);
+	INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
+
+	/* Initialize the fabric iocb list */
+	INIT_LIST_HEAD(&phba->fabric_iocb_list);
+
+	/* Initialize list to save ELS buffers */
+	INIT_LIST_HEAD(&phba->elsbuf);
+
+	/* Initialize FCF connection rec list */
+	INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
+
+	return 0;
+}
+
+/**
+ * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources after the
+ * device specific resource setup to support the HBA device it attached to.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	other values - error
+ **/
+static int
+lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
+{
+	int error;
+
+	/* Startup the kernel thread for this host adapter. */
+	phba->worker_thread = kthread_run(lpfc_do_work, phba,
+					  "lpfc_worker_%d", phba->brd_no);
+	if (IS_ERR(phba->worker_thread)) {
+		error = PTR_ERR(phba->worker_thread);
+		return error;
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the driver internal resources set up after
+ * the device specific resource setup for supporting the HBA device it
+ * attached to.
+ **/
+static void
+lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
+{
+	/* Stop kernel worker thread */
+	kthread_stop(phba->worker_thread);
+}
+
+/**
+ * lpfc_free_iocb_list - Free iocb list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver's IOCB list and memory.
+ **/
+static void
+lpfc_free_iocb_list(struct lpfc_hba *phba)
+{
+	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
+
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(iocbq_entry, iocbq_next,
+				 &phba->lpfc_iocb_list, list) {
+		list_del(&iocbq_entry->list);
+		kfree(iocbq_entry);
+		phba->total_iocbq_bufs--;
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	return;
+}
+
+/**
+ * lpfc_init_iocb_list - Allocate and initialize iocb list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate and initizlize the driver's IOCB
+ * list and set up the IOCB tag array accordingly.
+ *
+ * Return codes
+ *	0 - successful
+ *	other values - error
+ **/
+static int
+lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
+{
+	struct lpfc_iocbq *iocbq_entry = NULL;
+	uint16_t iotag;
+	int i;
+
+	/* Initialize and populate the iocb list per host.  */
+	INIT_LIST_HEAD(&phba->lpfc_iocb_list);
+	for (i = 0; i < iocb_count; i++) {
+		iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
+		if (iocbq_entry == NULL) {
+			printk(KERN_ERR "%s: only allocated %d iocbs of "
+				"expected %d count. Unloading driver.\n",
+				__func__, i, LPFC_IOCB_LIST_CNT);
+			goto out_free_iocbq;
+		}
+
+		iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
+		if (iotag == 0) {
+			kfree(iocbq_entry);
+			printk(KERN_ERR "%s: failed to allocate IOTAG. "
+				"Unloading driver.\n", __func__);
+			goto out_free_iocbq;
+		}
+		iocbq_entry->sli4_lxritag = NO_XRI;
+		iocbq_entry->sli4_xritag = NO_XRI;
+
+		spin_lock_irq(&phba->hbalock);
+		list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
+		phba->total_iocbq_bufs++;
+		spin_unlock_irq(&phba->hbalock);
+	}
+
+	return 0;
+
+out_free_iocbq:
+	lpfc_free_iocb_list(phba);
+
+	return -ENOMEM;
+}
+
+/**
+ * lpfc_free_sgl_list - Free sgl list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver's sgl list and memory.
+ **/
+static void
+lpfc_free_sgl_list(struct lpfc_hba *phba)
+{
+	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+	LIST_HEAD(sglq_list);
+
+	spin_lock_irq(&phba->hbalock);
+	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
+	spin_unlock_irq(&phba->hbalock);
+
+	list_for_each_entry_safe(sglq_entry, sglq_next,
+				 &sglq_list, list) {
+		list_del(&sglq_entry->list);
+		lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
+		kfree(sglq_entry);
+		phba->sli4_hba.total_sglq_bufs--;
+	}
+	kfree(phba->sli4_hba.lpfc_els_sgl_array);
+}
+
+/**
+ * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate the driver's active sgl memory.
+ * This array will hold the sglq_entry's for active IOs.
+ **/
+static int
+lpfc_init_active_sgl_array(struct lpfc_hba *phba)
+{
+	int size;
+	size = sizeof(struct lpfc_sglq *);
+	size *= phba->sli4_hba.max_cfg_param.max_xri;
+
+	phba->sli4_hba.lpfc_sglq_active_list =
+		kzalloc(size, GFP_KERNEL);
+	if (!phba->sli4_hba.lpfc_sglq_active_list)
+		return -ENOMEM;
+	return 0;
+}
+
+/**
+ * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to walk through the array of active sglq entries
+ * and free all of the resources.
+ * This is just a place holder for now.
+ **/
+static void
+lpfc_free_active_sgl(struct lpfc_hba *phba)
+{
+	kfree(phba->sli4_hba.lpfc_sglq_active_list);
+}
+
+/**
+ * lpfc_init_sgl_list - Allocate and initialize sgl list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate and initizlize the driver's sgl
+ * list and set up the sgl xritag tag array accordingly.
+ *
+ * Return codes
+ *	0 - successful
+ *	other values - error
+ **/
+static int
+lpfc_init_sgl_list(struct lpfc_hba *phba)
+{
+	struct lpfc_sglq *sglq_entry = NULL;
+	int i;
+	int els_xri_cnt;
+
+	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"2400 ELS XRI count %d.\n",
+				els_xri_cnt);
+	/* Initialize and populate the sglq list per host/VF. */
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+
+	/* Sanity check on XRI management */
+	if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2562 No room left for SCSI XRI allocation: "
+				"max_xri=%d, els_xri=%d\n",
+				phba->sli4_hba.max_cfg_param.max_xri,
+				els_xri_cnt);
+		return -ENOMEM;
+	}
+
+	/* Allocate memory for the ELS XRI management array */
+	phba->sli4_hba.lpfc_els_sgl_array =
+			kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
+			GFP_KERNEL);
+
+	if (!phba->sli4_hba.lpfc_els_sgl_array) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2401 Failed to allocate memory for ELS "
+				"XRI management array of size %d.\n",
+				els_xri_cnt);
+		return -ENOMEM;
+	}
+
+	/* Keep the SCSI XRI into the XRI management array */
+	phba->sli4_hba.scsi_xri_max =
+			phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
+	phba->sli4_hba.scsi_xri_cnt = 0;
+	phba->sli4_hba.lpfc_scsi_psb_array =
+			kzalloc((sizeof(struct lpfc_scsi_buf *) *
+			phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
+
+	if (!phba->sli4_hba.lpfc_scsi_psb_array) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2563 Failed to allocate memory for SCSI "
+				"XRI management array of size %d.\n",
+				phba->sli4_hba.scsi_xri_max);
+		kfree(phba->sli4_hba.lpfc_els_sgl_array);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < els_xri_cnt; i++) {
+		sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
+		if (sglq_entry == NULL) {
+			printk(KERN_ERR "%s: only allocated %d sgls of "
+				"expected %d count. Unloading driver.\n",
+				__func__, i, els_xri_cnt);
+			goto out_free_mem;
+		}
+
+		sglq_entry->buff_type = GEN_BUFF_TYPE;
+		sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
+		if (sglq_entry->virt == NULL) {
+			kfree(sglq_entry);
+			printk(KERN_ERR "%s: failed to allocate mbuf.\n"
+				"Unloading driver.\n", __func__);
+			goto out_free_mem;
+		}
+		sglq_entry->sgl = sglq_entry->virt;
+		memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
+
+		/* The list order is used by later block SGL registraton */
+		spin_lock_irq(&phba->hbalock);
+		sglq_entry->state = SGL_FREED;
+		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
+		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
+		phba->sli4_hba.total_sglq_bufs++;
+		spin_unlock_irq(&phba->hbalock);
+	}
+	return 0;
+
+out_free_mem:
+	kfree(phba->sli4_hba.lpfc_scsi_psb_array);
+	lpfc_free_sgl_list(phba);
+	return -ENOMEM;
+}
+
+/**
+ * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * port for those SLI4 ports that do not support extents.  This routine
+ * posts a PAGE_SIZE memory region to the port to hold up to
+ * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine
+ * and should be called only when interrupts are disabled.
+ *
+ * Return codes
+ * 	0 - successful
+ *	-ERROR - otherwise.
+ **/
+int
+lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
+{
+	int rc = 0;
+	struct lpfc_rpi_hdr *rpi_hdr;
+
+	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
+	if (!phba->sli4_hba.rpi_hdrs_in_use)
+		return rc;
+	if (phba->sli4_hba.extents_in_use)
+		return -EIO;
+
+	rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
+	if (!rpi_hdr) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0391 Error during rpi post operation\n");
+		lpfc_sli4_remove_rpis(phba);
+		rc = -ENODEV;
+	}
+
+	return rc;
+}
+
+/**
+ * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate a single 4KB memory region to
+ * support rpis and stores them in the phba.  This single region
+ * provides support for up to 64 rpis.  The region is used globally
+ * by the device.
+ *
+ * Returns:
+ *   A valid rpi hdr on success.
+ *   A NULL pointer on any failure.
+ **/
+struct lpfc_rpi_hdr *
+lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
+{
+	uint16_t rpi_limit, curr_rpi_range;
+	struct lpfc_dmabuf *dmabuf;
+	struct lpfc_rpi_hdr *rpi_hdr;
+	uint32_t rpi_count;
+
+	/*
+	 * If the SLI4 port supports extents, posting the rpi header isn't
+	 * required.  Set the expected maximum count and let the actual value
+	 * get set when extents are fully allocated.
+	 */
+	if (!phba->sli4_hba.rpi_hdrs_in_use)
+		return NULL;
+	if (phba->sli4_hba.extents_in_use)
+		return NULL;
+
+	/* The limit on the logical index is just the max_rpi count. */
+	rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
+	phba->sli4_hba.max_cfg_param.max_rpi - 1;
+
+	spin_lock_irq(&phba->hbalock);
+	/*
+	 * Establish the starting RPI in this header block.  The starting
+	 * rpi is normalized to a zero base because the physical rpi is
+	 * port based.
+	 */
+	curr_rpi_range = phba->sli4_hba.next_rpi;
+	spin_unlock_irq(&phba->hbalock);
+
+	/*
+	 * The port has a limited number of rpis. The increment here
+	 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
+	 * and to allow the full max_rpi range per port.
+	 */
+	if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
+		rpi_count = rpi_limit - curr_rpi_range;
+	else
+		rpi_count = LPFC_RPI_HDR_COUNT;
+
+	if (!rpi_count)
+		return NULL;
+	/*
+	 * First allocate the protocol header region for the port.  The
+	 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
+	 */
+	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!dmabuf)
+		return NULL;
+
+	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+					  LPFC_HDR_TEMPLATE_SIZE,
+					  &dmabuf->phys,
+					  GFP_KERNEL);
+	if (!dmabuf->virt) {
+		rpi_hdr = NULL;
+		goto err_free_dmabuf;
+	}
+
+	memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
+	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
+		rpi_hdr = NULL;
+		goto err_free_coherent;
+	}
+
+	/* Save the rpi header data for cleanup later. */
+	rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
+	if (!rpi_hdr)
+		goto err_free_coherent;
+
+	rpi_hdr->dmabuf = dmabuf;
+	rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
+	rpi_hdr->page_count = 1;
+	spin_lock_irq(&phba->hbalock);
+
+	/* The rpi_hdr stores the logical index only. */
+	rpi_hdr->start_rpi = curr_rpi_range;
+	list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
+
+	/*
+	 * The next_rpi stores the next logical module-64 rpi value used
+	 * to post physical rpis in subsequent rpi postings.
+	 */
+	phba->sli4_hba.next_rpi += rpi_count;
+	spin_unlock_irq(&phba->hbalock);
+	return rpi_hdr;
+
+ err_free_coherent:
+	dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
+			  dmabuf->virt, dmabuf->phys);
+ err_free_dmabuf:
+	kfree(dmabuf);
+	return NULL;
+}
+
+/**
+ * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to remove all memory resources allocated
+ * to support rpis for SLI4 ports not supporting extents. This routine
+ * presumes the caller has released all rpis consumed by fabric or port
+ * logins and is prepared to have the header pages removed.
+ **/
+void
+lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
+{
+	struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
+
+	if (!phba->sli4_hba.rpi_hdrs_in_use)
+		goto exit;
+
+	list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
+				 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
+		list_del(&rpi_hdr->list);
+		dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
+				  rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
+		kfree(rpi_hdr->dmabuf);
+		kfree(rpi_hdr);
+	}
+ exit:
+	/* There are no rpis available to the port now. */
+	phba->sli4_hba.next_rpi = 0;
+}
+
+/**
+ * lpfc_hba_alloc - Allocate driver hba data structure for a device.
+ * @pdev: pointer to pci device data structure.
+ *
+ * This routine is invoked to allocate the driver hba data structure for an
+ * HBA device. If the allocation is successful, the phba reference to the
+ * PCI device data structure is set.
+ *
+ * Return codes
+ *      pointer to @phba - successful
+ *      NULL - error
+ **/
+static struct lpfc_hba *
+lpfc_hba_alloc(struct pci_dev *pdev)
+{
+	struct lpfc_hba *phba;
+
+	/* Allocate memory for HBA structure */
+	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
+	if (!phba) {
+		dev_err(&pdev->dev, "failed to allocate hba struct\n");
+		return NULL;
+	}
+
+	/* Set reference to PCI device in HBA structure */
+	phba->pcidev = pdev;
+
+	/* Assign an unused board number */
+	phba->brd_no = lpfc_get_instance();
+	if (phba->brd_no < 0) {
+		kfree(phba);
+		return NULL;
+	}
+
+	spin_lock_init(&phba->ct_ev_lock);
+	INIT_LIST_HEAD(&phba->ct_ev_waiters);
+
+	return phba;
+}
+
+/**
+ * lpfc_hba_free - Free driver hba data structure with a device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver hba data structure with an
+ * HBA device.
+ **/
+static void
+lpfc_hba_free(struct lpfc_hba *phba)
+{
+	/* Release the driver assigned board number */
+	idr_remove(&lpfc_hba_index, phba->brd_no);
+
+	kfree(phba);
+	return;
+}
+
+/**
+ * lpfc_create_shost - Create hba physical port with associated scsi host.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to create HBA physical port and associate a SCSI
+ * host with it.
+ *
+ * Return codes
+ *      0 - successful
+ *      other values - error
+ **/
+static int
+lpfc_create_shost(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport;
+	struct Scsi_Host  *shost;
+
+	/* Initialize HBA FC structure */
+	phba->fc_edtov = FF_DEF_EDTOV;
+	phba->fc_ratov = FF_DEF_RATOV;
+	phba->fc_altov = FF_DEF_ALTOV;
+	phba->fc_arbtov = FF_DEF_ARBTOV;
+
+	atomic_set(&phba->sdev_cnt, 0);
+	vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
+	if (!vport)
+		return -ENODEV;
+
+	shost = lpfc_shost_from_vport(vport);
+	phba->pport = vport;
+	lpfc_debugfs_initialize(vport);
+	/* Put reference to SCSI host to driver's device private data */
+	pci_set_drvdata(phba->pcidev, shost);
+
+	return 0;
+}
+
+/**
+ * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to destroy HBA physical port and the associated
+ * SCSI host.
+ **/
+static void
+lpfc_destroy_shost(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+
+	/* Destroy physical port that associated with the SCSI host */
+	destroy_port(vport);
+
+	return;
+}
+
+/**
+ * lpfc_setup_bg - Setup Block guard structures and debug areas.
+ * @phba: pointer to lpfc hba data structure.
+ * @shost: the shost to be used to detect Block guard settings.
+ *
+ * This routine sets up the local Block guard protocol settings for @shost.
+ * This routine also allocates memory for debugging bg buffers.
+ **/
+static void
+lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
+{
+	int pagecnt = 10;
+	if (lpfc_prot_mask && lpfc_prot_guard) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"1478 Registering BlockGuard with the "
+				"SCSI layer\n");
+		scsi_host_set_prot(shost, lpfc_prot_mask);
+		scsi_host_set_guard(shost, lpfc_prot_guard);
+	}
+	if (!_dump_buf_data) {
+		while (pagecnt) {
+			spin_lock_init(&_dump_buf_lock);
+			_dump_buf_data =
+				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
+			if (_dump_buf_data) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9043 BLKGRD: allocated %d pages for "
+				       "_dump_buf_data at 0x%p\n",
+				       (1 << pagecnt), _dump_buf_data);
+				_dump_buf_data_order = pagecnt;
+				memset(_dump_buf_data, 0,
+				       ((1 << PAGE_SHIFT) << pagecnt));
+				break;
+			} else
+				--pagecnt;
+		}
+		if (!_dump_buf_data_order)
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9044 BLKGRD: ERROR unable to allocate "
+			       "memory for hexdump\n");
+	} else
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9045 BLKGRD: already allocated _dump_buf_data=0x%p"
+		       "\n", _dump_buf_data);
+	if (!_dump_buf_dif) {
+		while (pagecnt) {
+			_dump_buf_dif =
+				(char *) __get_free_pages(GFP_KERNEL, pagecnt);
+			if (_dump_buf_dif) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9046 BLKGRD: allocated %d pages for "
+				       "_dump_buf_dif at 0x%p\n",
+				       (1 << pagecnt), _dump_buf_dif);
+				_dump_buf_dif_order = pagecnt;
+				memset(_dump_buf_dif, 0,
+				       ((1 << PAGE_SHIFT) << pagecnt));
+				break;
+			} else
+				--pagecnt;
+		}
+		if (!_dump_buf_dif_order)
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9047 BLKGRD: ERROR unable to allocate "
+			       "memory for hexdump\n");
+	} else
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
+		       _dump_buf_dif);
+}
+
+/**
+ * lpfc_post_init_setup - Perform necessary device post initialization setup.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to perform all the necessary post initialization
+ * setup for the device.
+ **/
+static void
+lpfc_post_init_setup(struct lpfc_hba *phba)
+{
+	struct Scsi_Host  *shost;
+	struct lpfc_adapter_event_header adapter_event;
+
+	/* Get the default values for Model Name and Description */
+	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
+
+	/*
+	 * hba setup may have changed the hba_queue_depth so we need to
+	 * adjust the value of can_queue.
+	 */
+	shost = pci_get_drvdata(phba->pcidev);
+	shost->can_queue = phba->cfg_hba_queue_depth - 10;
+	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
+		lpfc_setup_bg(phba, shost);
+
+	lpfc_host_attrib_init(shost);
+
+	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+		spin_lock_irq(shost->host_lock);
+		lpfc_poll_start_timer(phba);
+		spin_unlock_irq(shost->host_lock);
+	}
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"0428 Perform SCSI scan\n");
+	/* Send board arrival event to upper layer */
+	adapter_event.event_type = FC_REG_ADAPTER_EVENT;
+	adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
+	fc_host_post_vendor_event(shost, fc_get_event_number(),
+				  sizeof(adapter_event),
+				  (char *) &adapter_event,
+				  LPFC_NL_VENDOR_ID);
+	return;
+}
+
+/**
+ * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the PCI device memory space for device
+ * with SLI-3 interface spec.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	other values - error
+ **/
+static int
+lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
+{
+	struct pci_dev *pdev;
+	unsigned long bar0map_len, bar2map_len;
+	int i, hbq_count;
+	void *ptr;
+	int error = -ENODEV;
+
+	/* Obtain PCI device reference */
+	if (!phba->pcidev)
+		return error;
+	else
+		pdev = phba->pcidev;
+
+	/* Set the device DMA mask size */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
+	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
+		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
+		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
+			return error;
+		}
+	}
+
+	/* Get the bus address of Bar0 and Bar2 and the number of bytes
+	 * required by each mapping.
+	 */
+	phba->pci_bar0_map = pci_resource_start(pdev, 0);
+	bar0map_len = pci_resource_len(pdev, 0);
+
+	phba->pci_bar2_map = pci_resource_start(pdev, 2);
+	bar2map_len = pci_resource_len(pdev, 2);
+
+	/* Map HBA SLIM to a kernel virtual address. */
+	phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
+	if (!phba->slim_memmap_p) {
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "ioremap failed for SLIM memory.\n");
+		goto out;
+	}
+
+	/* Map HBA Control Registers to a kernel virtual address. */
+	phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
+	if (!phba->ctrl_regs_memmap_p) {
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "ioremap failed for HBA control registers.\n");
+		goto out_iounmap_slim;
+	}
+
+	/* Allocate memory for SLI-2 structures */
+	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
+					       SLI2_SLIM_SIZE,
+					       &phba->slim2p.phys,
+					       GFP_KERNEL);
+	if (!phba->slim2p.virt)
+		goto out_iounmap;
+
+	memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
+	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
+	phba->mbox_ext = (phba->slim2p.virt +
+		offsetof(struct lpfc_sli2_slim, mbx_ext_words));
+	phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
+	phba->IOCBs = (phba->slim2p.virt +
+		       offsetof(struct lpfc_sli2_slim, IOCBs));
+
+	phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
+						 lpfc_sli_hbq_size(),
+						 &phba->hbqslimp.phys,
+						 GFP_KERNEL);
+	if (!phba->hbqslimp.virt)
+		goto out_free_slim;
+
+	hbq_count = lpfc_sli_hbq_count();
+	ptr = phba->hbqslimp.virt;
+	for (i = 0; i < hbq_count; ++i) {
+		phba->hbqs[i].hbq_virt = ptr;
+		INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
+		ptr += (lpfc_hbq_defs[i]->entry_count *
+			sizeof(struct lpfc_hbq_entry));
+	}
+	phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
+	phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
+
+	memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
+
+	INIT_LIST_HEAD(&phba->rb_pend_list);
+
+	phba->MBslimaddr = phba->slim_memmap_p;
+	phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
+	phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
+	phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
+	phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
+
+	return 0;
+
+out_free_slim:
+	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+			  phba->slim2p.virt, phba->slim2p.phys);
+out_iounmap:
+	iounmap(phba->ctrl_regs_memmap_p);
+out_iounmap_slim:
+	iounmap(phba->slim_memmap_p);
+out:
+	return error;
+}
+
+/**
+ * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the PCI device memory space for device
+ * with SLI-3 interface spec.
+ **/
+static void
+lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
+{
+	struct pci_dev *pdev;
+
+	/* Obtain PCI device reference */
+	if (!phba->pcidev)
+		return;
+	else
+		pdev = phba->pcidev;
+
+	/* Free coherent DMA memory allocated */
+	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
+			  phba->hbqslimp.virt, phba->hbqslimp.phys);
+	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+			  phba->slim2p.virt, phba->slim2p.phys);
+
+	/* I/O memory unmap */
+	iounmap(phba->ctrl_regs_memmap_p);
+	iounmap(phba->slim_memmap_p);
+
+	return;
+}
+
+/**
+ * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
+ * done and check status.
+ *
+ * Return 0 if successful, otherwise -ENODEV.
+ **/
+int
+lpfc_sli4_post_status_check(struct lpfc_hba *phba)
+{
+	struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
+	struct lpfc_register reg_data;
+	int i, port_error = 0;
+	uint32_t if_type;
+
+	memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
+	memset(&reg_data, 0, sizeof(reg_data));
+	if (!phba->sli4_hba.PSMPHRregaddr)
+		return -ENODEV;
+
+	/* Wait up to 30 seconds for the SLI Port POST done and ready */
+	for (i = 0; i < 3000; i++) {
+		if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
+			&portsmphr_reg.word0) ||
+			(bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
+			/* Port has a fatal POST error, break out */
+			port_error = -ENODEV;
+			break;
+		}
+		if (LPFC_POST_STAGE_PORT_READY ==
+		    bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
+			break;
+		msleep(10);
+	}
+
+	/*
+	 * If there was a port error during POST, then don't proceed with
+	 * other register reads as the data may not be valid.  Just exit.
+	 */
+	if (port_error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"1408 Port Failed POST - portsmphr=0x%x, "
+			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
+			"scr2=x%x, hscratch=x%x, pstatus=x%x\n",
+			portsmphr_reg.word0,
+			bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
+	} else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"2534 Device Info: SLIFamily=0x%x, "
+				"SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
+				"SLIHint_2=0x%x, FT=0x%x\n",
+				bf_get(lpfc_sli_intf_sli_family,
+				       &phba->sli4_hba.sli_intf),
+				bf_get(lpfc_sli_intf_slirev,
+				       &phba->sli4_hba.sli_intf),
+				bf_get(lpfc_sli_intf_if_type,
+				       &phba->sli4_hba.sli_intf),
+				bf_get(lpfc_sli_intf_sli_hint1,
+				       &phba->sli4_hba.sli_intf),
+				bf_get(lpfc_sli_intf_sli_hint2,
+				       &phba->sli4_hba.sli_intf),
+				bf_get(lpfc_sli_intf_func_type,
+				       &phba->sli4_hba.sli_intf));
+		/*
+		 * Check for other Port errors during the initialization
+		 * process.  Fail the load if the port did not come up
+		 * correctly.
+		 */
+		if_type = bf_get(lpfc_sli_intf_if_type,
+				 &phba->sli4_hba.sli_intf);
+		switch (if_type) {
+		case LPFC_SLI_INTF_IF_TYPE_0:
+			phba->sli4_hba.ue_mask_lo =
+			      readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
+			phba->sli4_hba.ue_mask_hi =
+			      readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
+			uerrlo_reg.word0 =
+			      readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
+			uerrhi_reg.word0 =
+				readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
+			if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
+			    (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"1422 Unrecoverable Error "
+						"Detected during POST "
+						"uerr_lo_reg=0x%x, "
+						"uerr_hi_reg=0x%x, "
+						"ue_mask_lo_reg=0x%x, "
+						"ue_mask_hi_reg=0x%x\n",
+						uerrlo_reg.word0,
+						uerrhi_reg.word0,
+						phba->sli4_hba.ue_mask_lo,
+						phba->sli4_hba.ue_mask_hi);
+				port_error = -ENODEV;
+			}
+			break;
+		case LPFC_SLI_INTF_IF_TYPE_2:
+			/* Final checks.  The port status should be clean. */
+			if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+				&reg_data.word0) ||
+				(bf_get(lpfc_sliport_status_err, &reg_data) &&
+				 !bf_get(lpfc_sliport_status_rn, &reg_data))) {
+				phba->work_status[0] =
+					readl(phba->sli4_hba.u.if_type2.
+					      ERR1regaddr);
+				phba->work_status[1] =
+					readl(phba->sli4_hba.u.if_type2.
+					      ERR2regaddr);
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2888 Unrecoverable port error "
+					"following POST: port status reg "
+					"0x%x, port_smphr reg 0x%x, "
+					"error 1=0x%x, error 2=0x%x\n",
+					reg_data.word0,
+					portsmphr_reg.word0,
+					phba->work_status[0],
+					phba->work_status[1]);
+				port_error = -ENODEV;
+			}
+			break;
+		case LPFC_SLI_INTF_IF_TYPE_1:
+		default:
+			break;
+		}
+	}
+	return port_error;
+}
+
+/**
+ * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
+ * @phba: pointer to lpfc hba data structure.
+ * @if_type:  The SLI4 interface type getting configured.
+ *
+ * This routine is invoked to set up SLI4 BAR0 PCI config space register
+ * memory map.
+ **/
+static void
+lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
+{
+	switch (if_type) {
+	case LPFC_SLI_INTF_IF_TYPE_0:
+		phba->sli4_hba.u.if_type0.UERRLOregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
+		phba->sli4_hba.u.if_type0.UERRHIregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
+		phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
+		phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
+		phba->sli4_hba.SLIINTFregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_2:
+		phba->sli4_hba.u.if_type2.ERR1regaddr =
+			phba->sli4_hba.conf_regs_memmap_p +
+						LPFC_CTL_PORT_ER1_OFFSET;
+		phba->sli4_hba.u.if_type2.ERR2regaddr =
+			phba->sli4_hba.conf_regs_memmap_p +
+						LPFC_CTL_PORT_ER2_OFFSET;
+		phba->sli4_hba.u.if_type2.CTRLregaddr =
+			phba->sli4_hba.conf_regs_memmap_p +
+						LPFC_CTL_PORT_CTL_OFFSET;
+		phba->sli4_hba.u.if_type2.STATUSregaddr =
+			phba->sli4_hba.conf_regs_memmap_p +
+						LPFC_CTL_PORT_STA_OFFSET;
+		phba->sli4_hba.SLIINTFregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
+		phba->sli4_hba.PSMPHRregaddr =
+			phba->sli4_hba.conf_regs_memmap_p +
+						LPFC_CTL_PORT_SEM_OFFSET;
+		phba->sli4_hba.RQDBregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
+		phba->sli4_hba.WQDBregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL;
+		phba->sli4_hba.EQCQDBregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
+		phba->sli4_hba.MQDBregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
+		phba->sli4_hba.BMBXregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_1:
+	default:
+		dev_printk(KERN_ERR, &phba->pcidev->dev,
+			   "FATAL - unsupported SLI4 interface type - %d\n",
+			   if_type);
+		break;
+	}
+}
+
+/**
+ * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
+ * memory map.
+ **/
+static void
+lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
+{
+	phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+		LPFC_SLIPORT_IF0_SMPHR;
+	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+		LPFC_HST_ISR0;
+	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+		LPFC_HST_IMR0;
+	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+		LPFC_HST_ISCR0;
+}
+
+/**
+ * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
+ * @phba: pointer to lpfc hba data structure.
+ * @vf: virtual function number
+ *
+ * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
+ * based on the given viftual function number, @vf.
+ *
+ * Return 0 if successful, otherwise -ENODEV.
+ **/
+static int
+lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
+{
+	if (vf > LPFC_VIR_FUNC_MAX)
+		return -ENODEV;
+
+	phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+				vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
+	phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+				vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
+	phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+				vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
+	phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+				vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
+	phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+				vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
+	return 0;
+}
+
+/**
+ * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to create the bootstrap mailbox
+ * region consistent with the SLI-4 interface spec.  This
+ * routine allocates all memory necessary to communicate
+ * mailbox commands to the port and sets up all alignment
+ * needs.  No locks are expected to be held when calling
+ * this routine.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	-ENOMEM - could not allocated memory.
+ **/
+static int
+lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
+{
+	uint32_t bmbx_size;
+	struct lpfc_dmabuf *dmabuf;
+	struct dma_address *dma_address;
+	uint32_t pa_addr;
+	uint64_t phys_addr;
+
+	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!dmabuf)
+		return -ENOMEM;
+
+	/*
+	 * The bootstrap mailbox region is comprised of 2 parts
+	 * plus an alignment restriction of 16 bytes.
+	 */
+	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
+	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+					  bmbx_size,
+					  &dmabuf->phys,
+					  GFP_KERNEL);
+	if (!dmabuf->virt) {
+		kfree(dmabuf);
+		return -ENOMEM;
+	}
+	memset(dmabuf->virt, 0, bmbx_size);
+
+	/*
+	 * Initialize the bootstrap mailbox pointers now so that the register
+	 * operations are simple later.  The mailbox dma address is required
+	 * to be 16-byte aligned.  Also align the virtual memory as each
+	 * maibox is copied into the bmbx mailbox region before issuing the
+	 * command to the port.
+	 */
+	phba->sli4_hba.bmbx.dmabuf = dmabuf;
+	phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
+
+	phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
+					      LPFC_ALIGN_16_BYTE);
+	phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
+					      LPFC_ALIGN_16_BYTE);
+
+	/*
+	 * Set the high and low physical addresses now.  The SLI4 alignment
+	 * requirement is 16 bytes and the mailbox is posted to the port
+	 * as two 30-bit addresses.  The other data is a bit marking whether
+	 * the 30-bit address is the high or low address.
+	 * Upcast bmbx aphys to 64bits so shift instruction compiles
+	 * clean on 32 bit machines.
+	 */
+	dma_address = &phba->sli4_hba.bmbx.dma_address;
+	phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
+	pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
+	dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
+					   LPFC_BMBX_BIT1_ADDR_HI);
+
+	pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
+	dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
+					   LPFC_BMBX_BIT1_ADDR_LO);
+	return 0;
+}
+
+/**
+ * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to teardown the bootstrap mailbox
+ * region and release all host resources. This routine requires
+ * the caller to ensure all mailbox commands recovered, no
+ * additional mailbox comands are sent, and interrupts are disabled
+ * before calling this routine.
+ *
+ **/
+static void
+lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
+{
+	dma_free_coherent(&phba->pcidev->dev,
+			  phba->sli4_hba.bmbx.bmbx_size,
+			  phba->sli4_hba.bmbx.dmabuf->virt,
+			  phba->sli4_hba.bmbx.dmabuf->phys);
+
+	kfree(phba->sli4_hba.bmbx.dmabuf);
+	memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
+}
+
+/**
+ * lpfc_sli4_read_config - Get the config parameters.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to read the configuration parameters from the HBA.
+ * The configuration parameters are used to set the base and maximum values
+ * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
+ * allocation for the port.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	-ENOMEM - No available memory
+ *      -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_read_config(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *pmb;
+	struct lpfc_mbx_read_config *rd_config;
+	union  lpfc_sli4_cfg_shdr *shdr;
+	uint32_t shdr_status, shdr_add_status;
+	struct lpfc_mbx_get_func_cfg *get_func_cfg;
+	struct lpfc_rsrc_desc_fcfcoe *desc;
+	uint32_t desc_count;
+	int length, i, rc = 0;
+
+	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2011 Unable to allocate memory for issuing "
+				"SLI_CONFIG_SPECIAL mailbox command\n");
+		return -ENOMEM;
+	}
+
+	lpfc_read_config(phba, pmb);
+
+	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+	if (rc != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"2012 Mailbox failed , mbxCmd x%x "
+			"READ_CONFIG, mbxStatus x%x\n",
+			bf_get(lpfc_mqe_command, &pmb->u.mqe),
+			bf_get(lpfc_mqe_status, &pmb->u.mqe));
+		rc = -EIO;
+	} else {
+		rd_config = &pmb->u.mqe.un.rd_config;
+		if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
+			phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
+			phba->sli4_hba.lnk_info.lnk_tp =
+				bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
+			phba->sli4_hba.lnk_info.lnk_no =
+				bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
+			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+					"3081 lnk_type:%d, lnk_numb:%d\n",
+					phba->sli4_hba.lnk_info.lnk_tp,
+					phba->sli4_hba.lnk_info.lnk_no);
+		} else
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					"3082 Mailbox (x%x) returned ldv:x0\n",
+					bf_get(lpfc_mqe_command, &pmb->u.mqe));
+		phba->sli4_hba.extents_in_use =
+			bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
+		phba->sli4_hba.max_cfg_param.max_xri =
+			bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
+		phba->sli4_hba.max_cfg_param.xri_base =
+			bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
+		phba->sli4_hba.max_cfg_param.max_vpi =
+			bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
+		phba->sli4_hba.max_cfg_param.vpi_base =
+			bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
+		phba->sli4_hba.max_cfg_param.max_rpi =
+			bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
+		phba->sli4_hba.max_cfg_param.rpi_base =
+			bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
+		phba->sli4_hba.max_cfg_param.max_vfi =
+			bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
+		phba->sli4_hba.max_cfg_param.vfi_base =
+			bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
+		phba->sli4_hba.max_cfg_param.max_fcfi =
+			bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
+		phba->sli4_hba.max_cfg_param.max_eq =
+			bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
+		phba->sli4_hba.max_cfg_param.max_rq =
+			bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
+		phba->sli4_hba.max_cfg_param.max_wq =
+			bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
+		phba->sli4_hba.max_cfg_param.max_cq =
+			bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
+		phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
+		phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
+		phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
+		phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
+		phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
+				(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
+		phba->max_vports = phba->max_vpi;
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"2003 cfg params Extents? %d "
+				"XRI(B:%d M:%d), "
+				"VPI(B:%d M:%d) "
+				"VFI(B:%d M:%d) "
+				"RPI(B:%d M:%d) "
+				"FCFI(Count:%d)\n",
+				phba->sli4_hba.extents_in_use,
+				phba->sli4_hba.max_cfg_param.xri_base,
+				phba->sli4_hba.max_cfg_param.max_xri,
+				phba->sli4_hba.max_cfg_param.vpi_base,
+				phba->sli4_hba.max_cfg_param.max_vpi,
+				phba->sli4_hba.max_cfg_param.vfi_base,
+				phba->sli4_hba.max_cfg_param.max_vfi,
+				phba->sli4_hba.max_cfg_param.rpi_base,
+				phba->sli4_hba.max_cfg_param.max_rpi,
+				phba->sli4_hba.max_cfg_param.max_fcfi);
+	}
+
+	if (rc)
+		goto read_cfg_out;
+
+	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
+	if (phba->cfg_hba_queue_depth >
+		(phba->sli4_hba.max_cfg_param.max_xri -
+			lpfc_sli4_get_els_iocb_cnt(phba)))
+		phba->cfg_hba_queue_depth =
+			phba->sli4_hba.max_cfg_param.max_xri -
+				lpfc_sli4_get_els_iocb_cnt(phba);
+
+	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+	    LPFC_SLI_INTF_IF_TYPE_2)
+		goto read_cfg_out;
+
+	/* get the pf# and vf# for SLI4 if_type 2 port */
+	length = (sizeof(struct lpfc_mbx_get_func_cfg) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
+			 length, LPFC_SLI4_MBX_EMBED);
+
+	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+	shdr = (union lpfc_sli4_cfg_shdr *)
+				&pmb->u.mqe.un.sli4_config.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (rc || shdr_status || shdr_add_status) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"3026 Mailbox failed , mbxCmd x%x "
+				"GET_FUNCTION_CONFIG, mbxStatus x%x\n",
+				bf_get(lpfc_mqe_command, &pmb->u.mqe),
+				bf_get(lpfc_mqe_status, &pmb->u.mqe));
+		rc = -EIO;
+		goto read_cfg_out;
+	}
+
+	/* search for fc_fcoe resrouce descriptor */
+	get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
+	desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
+
+	for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
+		desc = (struct lpfc_rsrc_desc_fcfcoe *)
+			&get_func_cfg->func_cfg.desc[i];
+		if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
+		    bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
+			phba->sli4_hba.iov.pf_number =
+				bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
+			phba->sli4_hba.iov.vf_number =
+				bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
+			break;
+		}
+	}
+
+	if (i < LPFC_RSRC_DESC_MAX_NUM)
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"3027 GET_FUNCTION_CONFIG: pf_number:%d, "
+				"vf_number:%d\n", phba->sli4_hba.iov.pf_number,
+				phba->sli4_hba.iov.vf_number);
+	else {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"3028 GET_FUNCTION_CONFIG: failed to find "
+				"Resrouce Descriptor:x%x\n",
+				LPFC_RSRC_DESC_TYPE_FCFCOE);
+		rc = -EIO;
+	}
+
+read_cfg_out:
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return rc;
+}
+
+/**
+ * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to setup the port-side endian order when
+ * the port if_type is 0.  This routine has no function for other
+ * if_types.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	-ENOMEM - No available memory
+ *      -EIO - The mailbox failed to complete successfully.
+ **/
+static int
+lpfc_setup_endian_order(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *mboxq;
+	uint32_t if_type, rc = 0;
+	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
+				      HOST_ENDIAN_HIGH_WORD1};
+
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	switch (if_type) {
+	case LPFC_SLI_INTF_IF_TYPE_0:
+		mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+						       GFP_KERNEL);
+		if (!mboxq) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0492 Unable to allocate memory for "
+					"issuing SLI_CONFIG_SPECIAL mailbox "
+					"command\n");
+			return -ENOMEM;
+		}
+
+		/*
+		 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
+		 * two words to contain special data values and no other data.
+		 */
+		memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
+		memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0493 SLI_CONFIG_SPECIAL mailbox "
+					"failed with status x%x\n",
+					rc);
+			rc = -EIO;
+		}
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_2:
+	case LPFC_SLI_INTF_IF_TYPE_1:
+	default:
+		break;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to check the user settable queue counts for EQs and
+ * CQs. after this routine is called the counts will be set to valid values that
+ * adhere to the constraints of the system's interrupt vectors and the port's
+ * queue resources.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No available memory
+ **/
+static int
+lpfc_sli4_queue_verify(struct lpfc_hba *phba)
+{
+	int cfg_fcp_wq_count;
+	int cfg_fcp_eq_count;
+
+	/*
+	 * Sanity check for confiugred queue parameters against the run-time
+	 * device parameters
+	 */
+
+	/* Sanity check on FCP fast-path WQ parameters */
+	cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
+	if (cfg_fcp_wq_count >
+	    (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
+		cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
+				   LPFC_SP_WQN_DEF;
+		if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2581 Not enough WQs (%d) from "
+					"the pci function for supporting "
+					"FCP WQs (%d)\n",
+					phba->sli4_hba.max_cfg_param.max_wq,
+					phba->cfg_fcp_wq_count);
+			goto out_error;
+		}
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"2582 Not enough WQs (%d) from the pci "
+				"function for supporting the requested "
+				"FCP WQs (%d), the actual FCP WQs can "
+				"be supported: %d\n",
+				phba->sli4_hba.max_cfg_param.max_wq,
+				phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
+	}
+	/* The actual number of FCP work queues adopted */
+	phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
+
+	/* Sanity check on FCP fast-path EQ parameters */
+	cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
+	if (cfg_fcp_eq_count >
+	    (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
+		cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
+				   LPFC_SP_EQN_DEF;
+		if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2574 Not enough EQs (%d) from the "
+					"pci function for supporting FCP "
+					"EQs (%d)\n",
+					phba->sli4_hba.max_cfg_param.max_eq,
+					phba->cfg_fcp_eq_count);
+			goto out_error;
+		}
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"2575 Not enough EQs (%d) from the pci "
+				"function for supporting the requested "
+				"FCP EQs (%d), the actual FCP EQs can "
+				"be supported: %d\n",
+				phba->sli4_hba.max_cfg_param.max_eq,
+				phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
+	}
+	/* It does not make sense to have more EQs than WQs */
+	if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"2593 The FCP EQ count(%d) cannot be greater "
+				"than the FCP WQ count(%d), limiting the "
+				"FCP EQ count to %d\n", cfg_fcp_eq_count,
+				phba->cfg_fcp_wq_count,
+				phba->cfg_fcp_wq_count);
+		cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
+	}
+	/* The actual number of FCP event queues adopted */
+	phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
+	/* The overall number of event queues used */
+	phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
+
+	/* Get EQ depth from module parameter, fake the default for now */
+	phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
+	phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
+
+	/* Get CQ depth from module parameter, fake the default for now */
+	phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
+	phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
+
+	return 0;
+out_error:
+	return -ENOMEM;
+}
+
+/**
+ * lpfc_sli4_queue_create - Create all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
+ * operation. For each SLI4 queue type, the parameters such as queue entry
+ * count (queue depth) shall be taken from the module parameter. For now,
+ * we just use some constant number as place holder.
+ *
+ * Return codes
+ *      0 - sucessful
+ *      -ENOMEM - No availble memory
+ *      -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_queue_create(struct lpfc_hba *phba)
+{
+	struct lpfc_queue *qdesc;
+	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
+
+	/*
+	 * Create Event Queues (EQs)
+	 */
+
+	/* Create slow path event queue */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+				      phba->sli4_hba.eq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0496 Failed allocate slow-path EQ\n");
+		goto out_error;
+	}
+	phba->sli4_hba.sp_eq = qdesc;
+
+	/*
+	 * Create fast-path FCP Event Queue(s).  The cfg_fcp_eq_count can be
+	 * zero whenever there is exactly one interrupt vector.  This is not
+	 * an error.
+	 */
+	if (phba->cfg_fcp_eq_count) {
+		phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
+				       phba->cfg_fcp_eq_count), GFP_KERNEL);
+		if (!phba->sli4_hba.fp_eq) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2576 Failed allocate memory for "
+					"fast-path EQ record array\n");
+			goto out_free_sp_eq;
+		}
+	}
+	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
+		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+					      phba->sli4_hba.eq_ecount);
+		if (!qdesc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0497 Failed allocate fast-path EQ\n");
+			goto out_free_fp_eq;
+		}
+		phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
+	}
+
+	/*
+	 * Create Complete Queues (CQs)
+	 */
+
+	/* Create slow-path Mailbox Command Complete Queue */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+				      phba->sli4_hba.cq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0500 Failed allocate slow-path mailbox CQ\n");
+		goto out_free_fp_eq;
+	}
+	phba->sli4_hba.mbx_cq = qdesc;
+
+	/* Create slow-path ELS Complete Queue */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+				      phba->sli4_hba.cq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0501 Failed allocate slow-path ELS CQ\n");
+		goto out_free_mbx_cq;
+	}
+	phba->sli4_hba.els_cq = qdesc;
+
+
+	/*
+	 * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs.
+	 * If there are no FCP EQs then create exactly one FCP CQ.
+	 */
+	if (phba->cfg_fcp_eq_count)
+		phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
+						 phba->cfg_fcp_eq_count),
+						GFP_KERNEL);
+	else
+		phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *),
+						GFP_KERNEL);
+	if (!phba->sli4_hba.fcp_cq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2577 Failed allocate memory for fast-path "
+				"CQ record array\n");
+		goto out_free_els_cq;
+	}
+	fcp_cqidx = 0;
+	do {
+		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+					      phba->sli4_hba.cq_ecount);
+		if (!qdesc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0499 Failed allocate fast-path FCP "
+					"CQ (%d)\n", fcp_cqidx);
+			goto out_free_fcp_cq;
+		}
+		phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
+	} while (++fcp_cqidx < phba->cfg_fcp_eq_count);
+
+	/* Create Mailbox Command Queue */
+	phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
+	phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
+
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
+				      phba->sli4_hba.mq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0505 Failed allocate slow-path MQ\n");
+		goto out_free_fcp_cq;
+	}
+	phba->sli4_hba.mbx_wq = qdesc;
+
+	/*
+	 * Create all the Work Queues (WQs)
+	 */
+	phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
+	phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
+
+	/* Create slow-path ELS Work Queue */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+				      phba->sli4_hba.wq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0504 Failed allocate slow-path ELS WQ\n");
+		goto out_free_mbx_wq;
+	}
+	phba->sli4_hba.els_wq = qdesc;
+
+	/* Create fast-path FCP Work Queue(s) */
+	phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
+				phba->cfg_fcp_wq_count), GFP_KERNEL);
+	if (!phba->sli4_hba.fcp_wq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2578 Failed allocate memory for fast-path "
+				"WQ record array\n");
+		goto out_free_els_wq;
+	}
+	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
+		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+					      phba->sli4_hba.wq_ecount);
+		if (!qdesc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0503 Failed allocate fast-path FCP "
+					"WQ (%d)\n", fcp_wqidx);
+			goto out_free_fcp_wq;
+		}
+		phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
+	}
+
+	/*
+	 * Create Receive Queue (RQ)
+	 */
+	phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
+	phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
+
+	/* Create Receive Queue for header */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+				      phba->sli4_hba.rq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0506 Failed allocate receive HRQ\n");
+		goto out_free_fcp_wq;
+	}
+	phba->sli4_hba.hdr_rq = qdesc;
+
+	/* Create Receive Queue for data */
+	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+				      phba->sli4_hba.rq_ecount);
+	if (!qdesc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0507 Failed allocate receive DRQ\n");
+		goto out_free_hdr_rq;
+	}
+	phba->sli4_hba.dat_rq = qdesc;
+
+	return 0;
+
+out_free_hdr_rq:
+	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
+	phba->sli4_hba.hdr_rq = NULL;
+out_free_fcp_wq:
+	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
+		lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
+		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
+	}
+	kfree(phba->sli4_hba.fcp_wq);
+	phba->sli4_hba.fcp_wq = NULL;
+out_free_els_wq:
+	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
+	phba->sli4_hba.els_wq = NULL;
+out_free_mbx_wq:
+	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
+	phba->sli4_hba.mbx_wq = NULL;
+out_free_fcp_cq:
+	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
+		lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
+		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
+	}
+	kfree(phba->sli4_hba.fcp_cq);
+	phba->sli4_hba.fcp_cq = NULL;
+out_free_els_cq:
+	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
+	phba->sli4_hba.els_cq = NULL;
+out_free_mbx_cq:
+	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
+	phba->sli4_hba.mbx_cq = NULL;
+out_free_fp_eq:
+	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
+		lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
+		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
+	}
+	kfree(phba->sli4_hba.fp_eq);
+	phba->sli4_hba.fp_eq = NULL;
+out_free_sp_eq:
+	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
+	phba->sli4_hba.sp_eq = NULL;
+out_error:
+	return -ENOMEM;
+}
+
+/**
+ * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release all the SLI4 queues with the FCoE HBA
+ * operation.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No available memory
+ *      -EIO - The mailbox failed to complete successfully.
+ **/
+void
+lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
+{
+	int fcp_qidx;
+
+	/* Release mailbox command work queue */
+	lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
+	phba->sli4_hba.mbx_wq = NULL;
+
+	/* Release ELS work queue */
+	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
+	phba->sli4_hba.els_wq = NULL;
+
+	/* Release FCP work queue */
+	if (phba->sli4_hba.fcp_wq != NULL)
+		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
+		     fcp_qidx++)
+			lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
+	kfree(phba->sli4_hba.fcp_wq);
+	phba->sli4_hba.fcp_wq = NULL;
+
+	/* Release unsolicited receive queue */
+	lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
+	phba->sli4_hba.hdr_rq = NULL;
+	lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
+	phba->sli4_hba.dat_rq = NULL;
+
+	/* Release ELS complete queue */
+	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
+	phba->sli4_hba.els_cq = NULL;
+
+	/* Release mailbox command complete queue */
+	lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
+	phba->sli4_hba.mbx_cq = NULL;
+
+	/* Release FCP response complete queue */
+	fcp_qidx = 0;
+	if (phba->sli4_hba.fcp_cq != NULL)
+		do
+			lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
+		while (++fcp_qidx < phba->cfg_fcp_eq_count);
+	kfree(phba->sli4_hba.fcp_cq);
+	phba->sli4_hba.fcp_cq = NULL;
+
+	/* Release fast-path event queue */
+	if (phba->sli4_hba.fp_eq != NULL)
+		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+		     fcp_qidx++)
+			lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
+	kfree(phba->sli4_hba.fp_eq);
+	phba->sli4_hba.fp_eq = NULL;
+
+	/* Release slow-path event queue */
+	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
+	phba->sli4_hba.sp_eq = NULL;
+
+	return;
+}
+
+/**
+ * lpfc_sli4_queue_setup - Set up all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
+ * operation.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No available memory
+ *      -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_queue_setup(struct lpfc_hba *phba)
+{
+	int rc = -ENOMEM;
+	int fcp_eqidx, fcp_cqidx, fcp_wqidx;
+	int fcp_cq_index = 0;
+
+	/*
+	 * Set up Event Queues (EQs)
+	 */
+
+	/* Set up slow-path event queue */
+	if (!phba->sli4_hba.sp_eq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0520 Slow-path EQ not allocated\n");
+		goto out_error;
+	}
+	rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
+			    LPFC_SP_DEF_IMAX);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0521 Failed setup of slow-path EQ: "
+				"rc = 0x%x\n", rc);
+		goto out_error;
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2583 Slow-path EQ setup: queue-id=%d\n",
+			phba->sli4_hba.sp_eq->queue_id);
+
+	/* Set up fast-path event queue */
+	if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3147 Fast-path EQs not allocated\n");
+		rc = -ENOMEM;
+		goto out_destroy_sp_eq;
+	}
+	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
+		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0522 Fast-path EQ (%d) not "
+					"allocated\n", fcp_eqidx);
+			rc = -ENOMEM;
+			goto out_destroy_fp_eq;
+		}
+		rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
+				    phba->cfg_fcp_imax);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0523 Failed setup of fast-path EQ "
+					"(%d), rc = 0x%x\n", fcp_eqidx, rc);
+			goto out_destroy_fp_eq;
+		}
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"2584 Fast-path EQ setup: "
+				"queue[%d]-id=%d\n", fcp_eqidx,
+				phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
+	}
+
+	/*
+	 * Set up Complete Queues (CQs)
+	 */
+
+	/* Set up slow-path MBOX Complete Queue as the first CQ */
+	if (!phba->sli4_hba.mbx_cq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0528 Mailbox CQ not allocated\n");
+		rc = -ENOMEM;
+		goto out_destroy_fp_eq;
+	}
+	rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
+			    LPFC_MCQ, LPFC_MBOX);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0529 Failed setup of slow-path mailbox CQ: "
+				"rc = 0x%x\n", rc);
+		goto out_destroy_fp_eq;
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
+			phba->sli4_hba.mbx_cq->queue_id,
+			phba->sli4_hba.sp_eq->queue_id);
+
+	/* Set up slow-path ELS Complete Queue */
+	if (!phba->sli4_hba.els_cq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0530 ELS CQ not allocated\n");
+		rc = -ENOMEM;
+		goto out_destroy_mbx_cq;
+	}
+	rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
+			    LPFC_WCQ, LPFC_ELS);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0531 Failed setup of slow-path ELS CQ: "
+				"rc = 0x%x\n", rc);
+		goto out_destroy_mbx_cq;
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
+			phba->sli4_hba.els_cq->queue_id,
+			phba->sli4_hba.sp_eq->queue_id);
+
+	/* Set up fast-path FCP Response Complete Queue */
+	if (!phba->sli4_hba.fcp_cq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3148 Fast-path FCP CQ array not "
+				"allocated\n");
+		rc = -ENOMEM;
+		goto out_destroy_els_cq;
+	}
+	fcp_cqidx = 0;
+	do {
+		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0526 Fast-path FCP CQ (%d) not "
+					"allocated\n", fcp_cqidx);
+			rc = -ENOMEM;
+			goto out_destroy_fcp_cq;
+		}
+		if (phba->cfg_fcp_eq_count)
+			rc = lpfc_cq_create(phba,
+					    phba->sli4_hba.fcp_cq[fcp_cqidx],
+					    phba->sli4_hba.fp_eq[fcp_cqidx],
+					    LPFC_WCQ, LPFC_FCP);
+		else
+			rc = lpfc_cq_create(phba,
+					    phba->sli4_hba.fcp_cq[fcp_cqidx],
+					    phba->sli4_hba.sp_eq,
+					    LPFC_WCQ, LPFC_FCP);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0527 Failed setup of fast-path FCP "
+					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
+			goto out_destroy_fcp_cq;
+		}
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"2588 FCP CQ setup: cq[%d]-id=%d, "
+				"parent %seq[%d]-id=%d\n",
+				fcp_cqidx,
+				phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
+				(phba->cfg_fcp_eq_count) ? "" : "sp_",
+				fcp_cqidx,
+				(phba->cfg_fcp_eq_count) ?
+				   phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
+				   phba->sli4_hba.sp_eq->queue_id);
+	} while (++fcp_cqidx < phba->cfg_fcp_eq_count);
+
+	/*
+	 * Set up all the Work Queues (WQs)
+	 */
+
+	/* Set up Mailbox Command Queue */
+	if (!phba->sli4_hba.mbx_wq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0538 Slow-path MQ not allocated\n");
+		rc = -ENOMEM;
+		goto out_destroy_fcp_cq;
+	}
+	rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
+			    phba->sli4_hba.mbx_cq, LPFC_MBOX);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0539 Failed setup of slow-path MQ: "
+				"rc = 0x%x\n", rc);
+		goto out_destroy_fcp_cq;
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
+			phba->sli4_hba.mbx_wq->queue_id,
+			phba->sli4_hba.mbx_cq->queue_id);
+
+	/* Set up slow-path ELS Work Queue */
+	if (!phba->sli4_hba.els_wq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0536 Slow-path ELS WQ not allocated\n");
+		rc = -ENOMEM;
+		goto out_destroy_mbx_wq;
+	}
+	rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
+			    phba->sli4_hba.els_cq, LPFC_ELS);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0537 Failed setup of slow-path ELS WQ: "
+				"rc = 0x%x\n", rc);
+		goto out_destroy_mbx_wq;
+	}
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
+			phba->sli4_hba.els_wq->queue_id,
+			phba->sli4_hba.els_cq->queue_id);
+
+	/* Set up fast-path FCP Work Queue */
+	if (!phba->sli4_hba.fcp_wq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3149 Fast-path FCP WQ array not "
+				"allocated\n");
+		rc = -ENOMEM;
+		goto out_destroy_els_wq;
+	}
+	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
+		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0534 Fast-path FCP WQ (%d) not "
+					"allocated\n", fcp_wqidx);
+			rc = -ENOMEM;
+			goto out_destroy_fcp_wq;
+		}
+		rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
+				    phba->sli4_hba.fcp_cq[fcp_cq_index],
+				    LPFC_FCP);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0535 Failed setup of fast-path FCP "
+					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
+			goto out_destroy_fcp_wq;
+		}
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"2591 FCP WQ setup: wq[%d]-id=%d, "
+				"parent cq[%d]-id=%d\n",
+				fcp_wqidx,
+				phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
+				fcp_cq_index,
+				phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
+		/* Round robin FCP Work Queue's Completion Queue assignment */
+		if (phba->cfg_fcp_eq_count)
+			fcp_cq_index = ((fcp_cq_index + 1) %
+					phba->cfg_fcp_eq_count);
+	}
+
+	/*
+	 * Create Receive Queue (RQ)
+	 */
+	if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0540 Receive Queue not allocated\n");
+		rc = -ENOMEM;
+		goto out_destroy_fcp_wq;
+	}
+
+	lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
+	lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
+
+	rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
+			    phba->sli4_hba.els_cq, LPFC_USOL);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0541 Failed setup of Receive Queue: "
+				"rc = 0x%x\n", rc);
+		goto out_destroy_fcp_wq;
+	}
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
+			"parent cq-id=%d\n",
+			phba->sli4_hba.hdr_rq->queue_id,
+			phba->sli4_hba.dat_rq->queue_id,
+			phba->sli4_hba.els_cq->queue_id);
+	return 0;
+
+out_destroy_fcp_wq:
+	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
+		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
+out_destroy_els_wq:
+	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
+out_destroy_mbx_wq:
+	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
+out_destroy_fcp_cq:
+	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
+		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
+out_destroy_els_cq:
+	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
+out_destroy_mbx_cq:
+	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
+out_destroy_fp_eq:
+	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
+		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
+out_destroy_sp_eq:
+	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
+out_error:
+	return rc;
+}
+
+/**
+ * lpfc_sli4_queue_unset - Unset all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
+ * operation.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No available memory
+ *      -EIO - The mailbox failed to complete successfully.
+ **/
+void
+lpfc_sli4_queue_unset(struct lpfc_hba *phba)
+{
+	int fcp_qidx;
+
+	/* Unset mailbox command work queue */
+	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
+	/* Unset ELS work queue */
+	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
+	/* Unset unsolicited receive queue */
+	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
+	/* Unset FCP work queue */
+	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
+		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
+	/* Unset mailbox command complete queue */
+	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
+	/* Unset ELS complete queue */
+	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
+	/* Unset FCP response complete queue */
+	if (phba->sli4_hba.fcp_cq) {
+		fcp_qidx = 0;
+		do {
+			lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
+		} while (++fcp_qidx < phba->cfg_fcp_eq_count);
+	}
+	/* Unset fast-path event queue */
+	if (phba->sli4_hba.fp_eq) {
+		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+		     fcp_qidx++)
+			lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
+	}
+	/* Unset slow-path event queue */
+	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
+}
+
+/**
+ * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate and set up a pool of completion queue
+ * events. The body of the completion queue event is a completion queue entry
+ * CQE. For now, this pool is used for the interrupt service routine to queue
+ * the following HBA completion queue events for the worker thread to process:
+ *   - Mailbox asynchronous events
+ *   - Receive queue completion unsolicited events
+ * Later, this can be used for all the slow-path events.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No available memory
+ **/
+static int
+lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event;
+	int i;
+
+	for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
+		cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
+		if (!cq_event)
+			goto out_pool_create_fail;
+		list_add_tail(&cq_event->list,
+			      &phba->sli4_hba.sp_cqe_event_pool);
+	}
+	return 0;
+
+out_pool_create_fail:
+	lpfc_sli4_cq_event_pool_destroy(phba);
+	return -ENOMEM;
+}
+
+/**
+ * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the pool of completion queue events at
+ * driver unload time. Note that, it is the responsibility of the driver
+ * cleanup routine to free all the outstanding completion-queue events
+ * allocated from this pool back into the pool before invoking this routine
+ * to destroy the pool.
+ **/
+static void
+lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event, *next_cq_event;
+
+	list_for_each_entry_safe(cq_event, next_cq_event,
+				 &phba->sli4_hba.sp_cqe_event_pool, list) {
+		list_del(&cq_event->list);
+		kfree(cq_event);
+	}
+}
+
+/**
+ * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is the lock free version of the API invoked to allocate a
+ * completion-queue event from the free pool.
+ *
+ * Return: Pointer to the newly allocated completion-queue event if successful
+ *         NULL otherwise.
+ **/
+struct lpfc_cq_event *
+__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event = NULL;
+
+	list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
+			 struct lpfc_cq_event, list);
+	return cq_event;
+}
+
+/**
+ * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is the lock version of the API invoked to allocate a
+ * completion-queue event from the free pool.
+ *
+ * Return: Pointer to the newly allocated completion-queue event if successful
+ *         NULL otherwise.
+ **/
+struct lpfc_cq_event *
+lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event;
+	unsigned long iflags;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	cq_event = __lpfc_sli4_cq_event_alloc(phba);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return cq_event;
+}
+
+/**
+ * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
+ * @phba: pointer to lpfc hba data structure.
+ * @cq_event: pointer to the completion queue event to be freed.
+ *
+ * This routine is the lock free version of the API invoked to release a
+ * completion-queue event back into the free pool.
+ **/
+void
+__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
+			     struct lpfc_cq_event *cq_event)
+{
+	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
+}
+
+/**
+ * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
+ * @phba: pointer to lpfc hba data structure.
+ * @cq_event: pointer to the completion queue event to be freed.
+ *
+ * This routine is the lock version of the API invoked to release a
+ * completion-queue event back into the free pool.
+ **/
+void
+lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
+			   struct lpfc_cq_event *cq_event)
+{
+	unsigned long iflags;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	__lpfc_sli4_cq_event_release(phba, cq_event);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+}
+
+/**
+ * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to free all the pending completion-queue events to the
+ * back into the free pool for device reset.
+ **/
+static void
+lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
+{
+	LIST_HEAD(cqelist);
+	struct lpfc_cq_event *cqe;
+	unsigned long iflags;
+
+	/* Retrieve all the pending WCQEs from pending WCQE lists */
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	/* Pending FCP XRI abort events */
+	list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
+			 &cqelist);
+	/* Pending ELS XRI abort events */
+	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
+			 &cqelist);
+	/* Pending asynnc events */
+	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
+			 &cqelist);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	while (!list_empty(&cqelist)) {
+		list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
+		lpfc_sli4_cq_event_release(phba, cqe);
+	}
+}
+
+/**
+ * lpfc_pci_function_reset - Reset pci function.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to request a PCI function reset. It will destroys
+ * all resources assigned to the PCI function which originates this request.
+ *
+ * Return codes
+ *      0 - successful
+ *      -ENOMEM - No available memory
+ *      -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_pci_function_reset(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *mboxq;
+	uint32_t rc = 0, if_type;
+	uint32_t shdr_status, shdr_add_status;
+	uint32_t rdy_chk, num_resets = 0, reset_again = 0;
+	union lpfc_sli4_cfg_shdr *shdr;
+	struct lpfc_register reg_data;
+	uint16_t devid;
+
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	switch (if_type) {
+	case LPFC_SLI_INTF_IF_TYPE_0:
+		mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+						       GFP_KERNEL);
+		if (!mboxq) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0494 Unable to allocate memory for "
+					"issuing SLI_FUNCTION_RESET mailbox "
+					"command\n");
+			return -ENOMEM;
+		}
+
+		/* Setup PCI function reset mailbox-ioctl command */
+		lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+				 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
+				 LPFC_SLI4_MBX_EMBED);
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+		shdr = (union lpfc_sli4_cfg_shdr *)
+			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+					 &shdr->response);
+		if (rc != MBX_TIMEOUT)
+			mempool_free(mboxq, phba->mbox_mem_pool);
+		if (shdr_status || shdr_add_status || rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0495 SLI_FUNCTION_RESET mailbox "
+					"failed with status x%x add_status x%x,"
+					" mbx status x%x\n",
+					shdr_status, shdr_add_status, rc);
+			rc = -ENXIO;
+		}
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_2:
+		for (num_resets = 0;
+		     num_resets < MAX_IF_TYPE_2_RESETS;
+		     num_resets++) {
+			reg_data.word0 = 0;
+			bf_set(lpfc_sliport_ctrl_end, &reg_data,
+			       LPFC_SLIPORT_LITTLE_ENDIAN);
+			bf_set(lpfc_sliport_ctrl_ip, &reg_data,
+			       LPFC_SLIPORT_INIT_PORT);
+			writel(reg_data.word0, phba->sli4_hba.u.if_type2.
+			       CTRLregaddr);
+			/* flush */
+			pci_read_config_word(phba->pcidev,
+					     PCI_DEVICE_ID, &devid);
+			/*
+			 * Poll the Port Status Register and wait for RDY for
+			 * up to 10 seconds.  If the port doesn't respond, treat
+			 * it as an error.  If the port responds with RN, start
+			 * the loop again.
+			 */
+			for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
+				msleep(10);
+				if (lpfc_readl(phba->sli4_hba.u.if_type2.
+					      STATUSregaddr, &reg_data.word0)) {
+					rc = -ENODEV;
+					goto out;
+				}
+				if (bf_get(lpfc_sliport_status_rn, &reg_data))
+					reset_again++;
+				if (bf_get(lpfc_sliport_status_rdy, &reg_data))
+					break;
+			}
+
+			/*
+			 * If the port responds to the init request with
+			 * reset needed, delay for a bit and restart the loop.
+			 */
+			if (reset_again && (rdy_chk < 1000)) {
+				msleep(10);
+				reset_again = 0;
+				continue;
+			}
+
+			/* Detect any port errors. */
+			if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
+			    (rdy_chk >= 1000)) {
+				phba->work_status[0] = readl(
+					phba->sli4_hba.u.if_type2.ERR1regaddr);
+				phba->work_status[1] = readl(
+					phba->sli4_hba.u.if_type2.ERR2regaddr);
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2890 Port error detected during port "
+					"reset(%d): port status reg 0x%x, "
+					"error 1=0x%x, error 2=0x%x\n",
+					num_resets, reg_data.word0,
+					phba->work_status[0],
+					phba->work_status[1]);
+				rc = -ENODEV;
+			}
+
+			/*
+			 * Terminate the outer loop provided the Port indicated
+			 * ready within 10 seconds.
+			 */
+			if (rdy_chk < 1000)
+				break;
+		}
+		/* delay driver action following IF_TYPE_2 function reset */
+		msleep(100);
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_1:
+	default:
+		break;
+	}
+
+out:
+	/* Catch the not-ready port failure after a port reset. */
+	if (num_resets >= MAX_IF_TYPE_2_RESETS)
+		rc = -ENODEV;
+
+	return rc;
+}
+
+/**
+ * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
+ * @phba: pointer to lpfc hba data structure.
+ * @cnt: number of nop mailbox commands to send.
+ *
+ * This routine is invoked to send a number @cnt of NOP mailbox command and
+ * wait for each command to complete.
+ *
+ * Return: the number of NOP mailbox command completed.
+ **/
+static int
+lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
+{
+	LPFC_MBOXQ_t *mboxq;
+	int length, cmdsent;
+	uint32_t mbox_tmo;
+	uint32_t rc = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	if (cnt == 0) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"2518 Requested to send 0 NOP mailbox cmd\n");
+		return cnt;
+	}
+
+	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2519 Unable to allocate memory for issuing "
+				"NOP mailbox command\n");
+		return 0;
+	}
+
+	/* Set up NOP SLI4_CONFIG mailbox-ioctl command */
+	length = (sizeof(struct lpfc_mbx_nop) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
+
+	for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
+		if (!phba->sli4_hba.intr_enable)
+			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+		else {
+			mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
+			rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+		}
+		if (rc == MBX_TIMEOUT)
+			break;
+		/* Check return status */
+		shdr = (union lpfc_sli4_cfg_shdr *)
+			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+					 &shdr->response);
+		if (shdr_status || shdr_add_status || rc) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+					"2520 NOP mailbox command failed "
+					"status x%x add_status x%x mbx "
+					"status x%x\n", shdr_status,
+					shdr_add_status, rc);
+			break;
+		}
+	}
+
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mboxq, phba->mbox_mem_pool);
+
+	return cmdsent;
+}
+
+/**
+ * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the PCI device memory space for device
+ * with SLI-4 interface spec.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	other values - error
+ **/
+static int
+lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
+{
+	struct pci_dev *pdev;
+	unsigned long bar0map_len, bar1map_len, bar2map_len;
+	int error = -ENODEV;
+	uint32_t if_type;
+
+	/* Obtain PCI device reference */
+	if (!phba->pcidev)
+		return error;
+	else
+		pdev = phba->pcidev;
+
+	/* Set the device DMA mask size */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
+	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
+		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
+		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
+			return error;
+		}
+	}
+
+	/*
+	 * The BARs and register set definitions and offset locations are
+	 * dependent on the if_type.
+	 */
+	if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
+				  &phba->sli4_hba.sli_intf.word0)) {
+		return error;
+	}
+
+	/* There is no SLI3 failback for SLI4 devices. */
+	if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
+	    LPFC_SLI_INTF_VALID) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2894 SLI_INTF reg contents invalid "
+				"sli_intf reg 0x%x\n",
+				phba->sli4_hba.sli_intf.word0);
+		return error;
+	}
+
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	/*
+	 * Get the bus address of SLI4 device Bar regions and the
+	 * number of bytes required by each mapping. The mapping of the
+	 * particular PCI BARs regions is dependent on the type of
+	 * SLI4 device.
+	 */
+	if (pci_resource_start(pdev, 0)) {
+		phba->pci_bar0_map = pci_resource_start(pdev, 0);
+		bar0map_len = pci_resource_len(pdev, 0);
+
+		/*
+		 * Map SLI4 PCI Config Space Register base to a kernel virtual
+		 * addr
+		 */
+		phba->sli4_hba.conf_regs_memmap_p =
+			ioremap(phba->pci_bar0_map, bar0map_len);
+		if (!phba->sli4_hba.conf_regs_memmap_p) {
+			dev_printk(KERN_ERR, &pdev->dev,
+				   "ioremap failed for SLI4 PCI config "
+				   "registers.\n");
+			goto out;
+		}
+		/* Set up BAR0 PCI config space register memory map */
+		lpfc_sli4_bar0_register_memmap(phba, if_type);
+	} else {
+		phba->pci_bar0_map = pci_resource_start(pdev, 1);
+		bar0map_len = pci_resource_len(pdev, 1);
+		if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+			dev_printk(KERN_ERR, &pdev->dev,
+			   "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
+			goto out;
+		}
+		phba->sli4_hba.conf_regs_memmap_p =
+				ioremap(phba->pci_bar0_map, bar0map_len);
+		if (!phba->sli4_hba.conf_regs_memmap_p) {
+			dev_printk(KERN_ERR, &pdev->dev,
+				"ioremap failed for SLI4 PCI config "
+				"registers.\n");
+				goto out;
+		}
+		lpfc_sli4_bar0_register_memmap(phba, if_type);
+	}
+
+	if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
+	    (pci_resource_start(pdev, 2))) {
+		/*
+		 * Map SLI4 if type 0 HBA Control Register base to a kernel
+		 * virtual address and setup the registers.
+		 */
+		phba->pci_bar1_map = pci_resource_start(pdev, 2);
+		bar1map_len = pci_resource_len(pdev, 2);
+		phba->sli4_hba.ctrl_regs_memmap_p =
+				ioremap(phba->pci_bar1_map, bar1map_len);
+		if (!phba->sli4_hba.ctrl_regs_memmap_p) {
+			dev_printk(KERN_ERR, &pdev->dev,
+			   "ioremap failed for SLI4 HBA control registers.\n");
+			goto out_iounmap_conf;
+		}
+		lpfc_sli4_bar1_register_memmap(phba);
+	}
+
+	if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
+	    (pci_resource_start(pdev, 4))) {
+		/*
+		 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
+		 * virtual address and setup the registers.
+		 */
+		phba->pci_bar2_map = pci_resource_start(pdev, 4);
+		bar2map_len = pci_resource_len(pdev, 4);
+		phba->sli4_hba.drbl_regs_memmap_p =
+				ioremap(phba->pci_bar2_map, bar2map_len);
+		if (!phba->sli4_hba.drbl_regs_memmap_p) {
+			dev_printk(KERN_ERR, &pdev->dev,
+			   "ioremap failed for SLI4 HBA doorbell registers.\n");
+			goto out_iounmap_ctrl;
+		}
+		error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
+		if (error)
+			goto out_iounmap_all;
+	}
+
+	return 0;
+
+out_iounmap_all:
+	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+out_iounmap_ctrl:
+	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+out_iounmap_conf:
+	iounmap(phba->sli4_hba.conf_regs_memmap_p);
+out:
+	return error;
+}
+
+/**
+ * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the PCI device memory space for device
+ * with SLI-4 interface spec.
+ **/
+static void
+lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
+{
+	uint32_t if_type;
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+
+	switch (if_type) {
+	case LPFC_SLI_INTF_IF_TYPE_0:
+		iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+		iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+		iounmap(phba->sli4_hba.conf_regs_memmap_p);
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_2:
+		iounmap(phba->sli4_hba.conf_regs_memmap_p);
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_1:
+	default:
+		dev_printk(KERN_ERR, &phba->pcidev->dev,
+			   "FATAL - unsupported SLI4 interface type - %d\n",
+			   if_type);
+		break;
+	}
+}
+
+/**
+ * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI-X interrupt vectors to device
+ * with SLI-3 interface specs. The kernel function pci_enable_msix() is
+ * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
+ * invoked, enables either all or nothing, depending on the current
+ * availability of PCI vector resources. The device driver is responsible
+ * for calling the individual request_irq() to register each MSI-X vector
+ * with a interrupt handler, which is done in this function. Note that
+ * later when device is unloading, the driver should always call free_irq()
+ * on all MSI-X vectors it has done request_irq() on before calling
+ * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
+ * will be left with MSI-X enabled and leaks its vectors.
+ *
+ * Return codes
+ *   0 - successful
+ *   other values - error
+ **/
+static int
+lpfc_sli_enable_msix(struct lpfc_hba *phba)
+{
+	int rc, i;
+	LPFC_MBOXQ_t *pmb;
+
+	/* Set up MSI-X multi-message vectors */
+	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+		phba->msix_entries[i].entry = i;
+
+	/* Configure MSI-X capability structure */
+	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
+				ARRAY_SIZE(phba->msix_entries));
+	if (rc) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0420 PCI enable MSI-X failed (%d)\n", rc);
+		goto msi_fail_out;
+	}
+	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0477 MSI-X entry[%d]: vector=x%x "
+				"message=%d\n", i,
+				phba->msix_entries[i].vector,
+				phba->msix_entries[i].entry);
+	/*
+	 * Assign MSI-X vectors to interrupt handlers
+	 */
+
+	/* vector-0 is associated to slow-path handler */
+	rc = request_irq(phba->msix_entries[0].vector,
+			 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
+			 LPFC_SP_DRIVER_HANDLER_NAME, phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0421 MSI-X slow-path request_irq failed "
+				"(%d)\n", rc);
+		goto msi_fail_out;
+	}
+
+	/* vector-1 is associated to fast-path handler */
+	rc = request_irq(phba->msix_entries[1].vector,
+			 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
+			 LPFC_FP_DRIVER_HANDLER_NAME, phba);
+
+	if (rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0429 MSI-X fast-path request_irq failed "
+				"(%d)\n", rc);
+		goto irq_fail_out;
+	}
+
+	/*
+	 * Configure HBA MSI-X attention conditions to messages
+	 */
+	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+
+	if (!pmb) {
+		rc = -ENOMEM;
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0474 Unable to allocate memory for issuing "
+				"MBOX_CONFIG_MSI command\n");
+		goto mem_fail_out;
+	}
+	rc = lpfc_config_msi(phba, pmb);
+	if (rc)
+		goto mbx_fail_out;
+	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+	if (rc != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+				"0351 Config MSI mailbox command failed, "
+				"mbxCmd x%x, mbxStatus x%x\n",
+				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
+		goto mbx_fail_out;
+	}
+
+	/* Free memory allocated for mailbox command */
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return rc;
+
+mbx_fail_out:
+	/* Free memory allocated for mailbox command */
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+mem_fail_out:
+	/* free the irq already requested */
+	free_irq(phba->msix_entries[1].vector, phba);
+
+irq_fail_out:
+	/* free the irq already requested */
+	free_irq(phba->msix_entries[0].vector, phba);
+
+msi_fail_out:
+	/* Unconfigure MSI-X capability structure */
+	pci_disable_msix(phba->pcidev);
+	return rc;
+}
+
+/**
+ * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release the MSI-X vectors and then disable the
+ * MSI-X interrupt mode to device with SLI-3 interface spec.
+ **/
+static void
+lpfc_sli_disable_msix(struct lpfc_hba *phba)
+{
+	int i;
+
+	/* Free up MSI-X multi-message vectors */
+	for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+		free_irq(phba->msix_entries[i].vector, phba);
+	/* Disable MSI-X */
+	pci_disable_msix(phba->pcidev);
+
+	return;
+}
+
+/**
+ * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI interrupt mode to device with
+ * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
+ * enable the MSI vector. The device driver is responsible for calling the
+ * request_irq() to register MSI vector with a interrupt the handler, which
+ * is done in this function.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	other values - error
+ */
+static int
+lpfc_sli_enable_msi(struct lpfc_hba *phba)
+{
+	int rc;
+
+	rc = pci_enable_msi(phba->pcidev);
+	if (!rc)
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0462 PCI enable MSI mode success.\n");
+	else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0471 PCI enable MSI mode failed (%d)\n", rc);
+		return rc;
+	}
+
+	rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
+			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+	if (rc) {
+		pci_disable_msi(phba->pcidev);
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0478 MSI request_irq failed (%d)\n", rc);
+	}
+	return rc;
+}
+
+/**
+ * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable the MSI interrupt mode to device with
+ * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
+ * done request_irq() on before calling pci_disable_msi(). Failure to do so
+ * results in a BUG_ON() and a device will be left with MSI enabled and leaks
+ * its vector.
+ */
+static void
+lpfc_sli_disable_msi(struct lpfc_hba *phba)
+{
+	free_irq(phba->pcidev->irq, phba);
+	pci_disable_msi(phba->pcidev);
+	return;
+}
+
+/**
+ * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable device interrupt and associate driver's
+ * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
+ * spec. Depends on the interrupt mode configured to the driver, the driver
+ * will try to fallback from the configured interrupt mode to an interrupt
+ * mode which is supported by the platform, kernel, and device in the order
+ * of:
+ * MSI-X -> MSI -> IRQ.
+ *
+ * Return codes
+ *   0 - successful
+ *   other values - error
+ **/
+static uint32_t
+lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
+{
+	uint32_t intr_mode = LPFC_INTR_ERROR;
+	int retval;
+
+	if (cfg_mode == 2) {
+		/* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
+		retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
+		if (!retval) {
+			/* Now, try to enable MSI-X interrupt mode */
+			retval = lpfc_sli_enable_msix(phba);
+			if (!retval) {
+				/* Indicate initialization to MSI-X mode */
+				phba->intr_type = MSIX;
+				intr_mode = 2;
+			}
+		}
+	}
+
+	/* Fallback to MSI if MSI-X initialization failed */
+	if (cfg_mode >= 1 && phba->intr_type == NONE) {
+		retval = lpfc_sli_enable_msi(phba);
+		if (!retval) {
+			/* Indicate initialization to MSI mode */
+			phba->intr_type = MSI;
+			intr_mode = 1;
+		}
+	}
+
+	/* Fallback to INTx if both MSI-X/MSI initalization failed */
+	if (phba->intr_type == NONE) {
+		retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
+				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+		if (!retval) {
+			/* Indicate initialization to INTx mode */
+			phba->intr_type = INTx;
+			intr_mode = 0;
+		}
+	}
+	return intr_mode;
+}
+
+/**
+ * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable device interrupt and disassociate the
+ * driver's interrupt handler(s) from interrupt vector(s) to device with
+ * SLI-3 interface spec. Depending on the interrupt mode, the driver will
+ * release the interrupt vector(s) for the message signaled interrupt.
+ **/
+static void
+lpfc_sli_disable_intr(struct lpfc_hba *phba)
+{
+	/* Disable the currently initialized interrupt mode */
+	if (phba->intr_type == MSIX)
+		lpfc_sli_disable_msix(phba);
+	else if (phba->intr_type == MSI)
+		lpfc_sli_disable_msi(phba);
+	else if (phba->intr_type == INTx)
+		free_irq(phba->pcidev->irq, phba);
+
+	/* Reset interrupt management states */
+	phba->intr_type = NONE;
+	phba->sli.slistat.sli_intr = 0;
+
+	return;
+}
+
+/**
+ * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI-X interrupt vectors to device
+ * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
+ * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
+ * enables either all or nothing, depending on the current availability of
+ * PCI vector resources. The device driver is responsible for calling the
+ * individual request_irq() to register each MSI-X vector with a interrupt
+ * handler, which is done in this function. Note that later when device is
+ * unloading, the driver should always call free_irq() on all MSI-X vectors
+ * it has done request_irq() on before calling pci_disable_msix(). Failure
+ * to do so results in a BUG_ON() and a device will be left with MSI-X
+ * enabled and leaks its vectors.
+ *
+ * Return codes
+ * 0 - successful
+ * other values - error
+ **/
+static int
+lpfc_sli4_enable_msix(struct lpfc_hba *phba)
+{
+	int vectors, rc, index;
+
+	/* Set up MSI-X multi-message vectors */
+	for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
+		phba->sli4_hba.msix_entries[index].entry = index;
+
+	/* Configure MSI-X capability structure */
+	vectors = phba->sli4_hba.cfg_eqn;
+enable_msix_vectors:
+	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
+			     vectors);
+	if (rc > 1) {
+		vectors = rc;
+		goto enable_msix_vectors;
+	} else if (rc) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0484 PCI enable MSI-X failed (%d)\n", rc);
+		goto msi_fail_out;
+	}
+
+	/* Log MSI-X vector assignment */
+	for (index = 0; index < vectors; index++)
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0489 MSI-X entry[%d]: vector=x%x "
+				"message=%d\n", index,
+				phba->sli4_hba.msix_entries[index].vector,
+				phba->sli4_hba.msix_entries[index].entry);
+	/*
+	 * Assign MSI-X vectors to interrupt handlers
+	 */
+	if (vectors > 1)
+		rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
+				 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
+				 LPFC_SP_DRIVER_HANDLER_NAME, phba);
+	else
+		/* All Interrupts need to be handled by one EQ */
+		rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
+				 &lpfc_sli4_intr_handler, IRQF_SHARED,
+				 LPFC_DRIVER_NAME, phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0485 MSI-X slow-path request_irq failed "
+				"(%d)\n", rc);
+		goto msi_fail_out;
+	}
+
+	/* The rest of the vector(s) are associated to fast-path handler(s) */
+	for (index = 1; index < vectors; index++) {
+		phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
+		phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
+		rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
+				 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
+				 LPFC_FP_DRIVER_HANDLER_NAME,
+				 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+					"0486 MSI-X fast-path (%d) "
+					"request_irq failed (%d)\n", index, rc);
+			goto cfg_fail_out;
+		}
+	}
+	phba->sli4_hba.msix_vec_nr = vectors;
+
+	return rc;
+
+cfg_fail_out:
+	/* free the irq already requested */
+	for (--index; index >= 1; index--)
+		free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
+			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
+
+	/* free the irq already requested */
+	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
+
+msi_fail_out:
+	/* Unconfigure MSI-X capability structure */
+	pci_disable_msix(phba->pcidev);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release the MSI-X vectors and then disable the
+ * MSI-X interrupt mode to device with SLI-4 interface spec.
+ **/
+static void
+lpfc_sli4_disable_msix(struct lpfc_hba *phba)
+{
+	int index;
+
+	/* Free up MSI-X multi-message vectors */
+	free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
+
+	for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
+		free_irq(phba->sli4_hba.msix_entries[index].vector,
+			 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
+
+	/* Disable MSI-X */
+	pci_disable_msix(phba->pcidev);
+
+	return;
+}
+
+/**
+ * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI interrupt mode to device with
+ * SLI-4 interface spec. The kernel function pci_enable_msi() is called
+ * to enable the MSI vector. The device driver is responsible for calling
+ * the request_irq() to register MSI vector with a interrupt the handler,
+ * which is done in this function.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	other values - error
+ **/
+static int
+lpfc_sli4_enable_msi(struct lpfc_hba *phba)
+{
+	int rc, index;
+
+	rc = pci_enable_msi(phba->pcidev);
+	if (!rc)
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0487 PCI enable MSI mode success.\n");
+	else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0488 PCI enable MSI mode failed (%d)\n", rc);
+		return rc;
+	}
+
+	rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
+			 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+	if (rc) {
+		pci_disable_msi(phba->pcidev);
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0490 MSI request_irq failed (%d)\n", rc);
+		return rc;
+	}
+
+	for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
+		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable the MSI interrupt mode to device with
+ * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
+ * done request_irq() on before calling pci_disable_msi(). Failure to do so
+ * results in a BUG_ON() and a device will be left with MSI enabled and leaks
+ * its vector.
+ **/
+static void
+lpfc_sli4_disable_msi(struct lpfc_hba *phba)
+{
+	free_irq(phba->pcidev->irq, phba);
+	pci_disable_msi(phba->pcidev);
+	return;
+}
+
+/**
+ * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable device interrupt and associate driver's
+ * interrupt handler(s) to interrupt vector(s) to device with SLI-4
+ * interface spec. Depends on the interrupt mode configured to the driver,
+ * the driver will try to fallback from the configured interrupt mode to an
+ * interrupt mode which is supported by the platform, kernel, and device in
+ * the order of:
+ * MSI-X -> MSI -> IRQ.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	other values - error
+ **/
+static uint32_t
+lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
+{
+	uint32_t intr_mode = LPFC_INTR_ERROR;
+	int retval, index;
+
+	if (cfg_mode == 2) {
+		/* Preparation before conf_msi mbox cmd */
+		retval = 0;
+		if (!retval) {
+			/* Now, try to enable MSI-X interrupt mode */
+			retval = lpfc_sli4_enable_msix(phba);
+			if (!retval) {
+				/* Indicate initialization to MSI-X mode */
+				phba->intr_type = MSIX;
+				intr_mode = 2;
+			}
+		}
+	}
+
+	/* Fallback to MSI if MSI-X initialization failed */
+	if (cfg_mode >= 1 && phba->intr_type == NONE) {
+		retval = lpfc_sli4_enable_msi(phba);
+		if (!retval) {
+			/* Indicate initialization to MSI mode */
+			phba->intr_type = MSI;
+			intr_mode = 1;
+		}
+	}
+
+	/* Fallback to INTx if both MSI-X/MSI initalization failed */
+	if (phba->intr_type == NONE) {
+		retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
+				     IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+		if (!retval) {
+			/* Indicate initialization to INTx mode */
+			phba->intr_type = INTx;
+			intr_mode = 0;
+			for (index = 0; index < phba->cfg_fcp_eq_count;
+			     index++) {
+				phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+				phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+			}
+		}
+	}
+	return intr_mode;
+}
+
+/**
+ * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable device interrupt and disassociate
+ * the driver's interrupt handler(s) from interrupt vector(s) to device
+ * with SLI-4 interface spec. Depending on the interrupt mode, the driver
+ * will release the interrupt vector(s) for the message signaled interrupt.
+ **/
+static void
+lpfc_sli4_disable_intr(struct lpfc_hba *phba)
+{
+	/* Disable the currently initialized interrupt mode */
+	if (phba->intr_type == MSIX)
+		lpfc_sli4_disable_msix(phba);
+	else if (phba->intr_type == MSI)
+		lpfc_sli4_disable_msi(phba);
+	else if (phba->intr_type == INTx)
+		free_irq(phba->pcidev->irq, phba);
+
+	/* Reset interrupt management states */
+	phba->intr_type = NONE;
+	phba->sli.slistat.sli_intr = 0;
+
+	return;
+}
+
+/**
+ * lpfc_unset_hba - Unset SLI3 hba device initialization
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the HBA device initialization steps to
+ * a device with SLI-3 interface spec.
+ **/
+static void
+lpfc_unset_hba(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+
+	spin_lock_irq(shost->host_lock);
+	vport->load_flag |= FC_UNLOADING;
+	spin_unlock_irq(shost->host_lock);
+
+	kfree(phba->vpi_bmask);
+	kfree(phba->vpi_ids);
+
+	lpfc_stop_hba_timers(phba);
+
+	phba->pport->work_port_events = 0;
+
+	lpfc_sli_hba_down(phba);
+
+	lpfc_sli_brdrestart(phba);
+
+	lpfc_sli_disable_intr(phba);
+
+	return;
+}
+
+/**
+ * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the HBA device initialization steps to
+ * a device with SLI-4 interface spec.
+ **/
+static void
+lpfc_sli4_unset_hba(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *vport = phba->pport;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+
+	spin_lock_irq(shost->host_lock);
+	vport->load_flag |= FC_UNLOADING;
+	spin_unlock_irq(shost->host_lock);
+
+	phba->pport->work_port_events = 0;
+
+	/* Stop the SLI4 device port */
+	lpfc_stop_port(phba);
+
+	lpfc_sli4_disable_intr(phba);
+
+	/* Reset SLI4 HBA FCoE function */
+	lpfc_pci_function_reset(phba);
+	lpfc_sli4_queue_destroy(phba);
+
+	return;
+}
+
+/**
+ * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI4 code path to wait for completion
+ * of device's XRIs exchange busy. It will check the XRI exchange busy
+ * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
+ * that, it will check the XRI exchange busy on outstanding FCP and ELS
+ * I/Os every 30 seconds, log error message, and wait forever. Only when
+ * all XRI exchange busy complete, the driver unload shall proceed with
+ * invoking the function reset ioctl mailbox command to the CNA and the
+ * the rest of the driver unload resource release.
+ **/
+static void
+lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
+{
+	int wait_time = 0;
+	int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+	int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+
+	while (!fcp_xri_cmpl || !els_xri_cmpl) {
+		if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
+			if (!fcp_xri_cmpl)
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"2877 FCP XRI exchange busy "
+						"wait time: %d seconds.\n",
+						wait_time/1000);
+			if (!els_xri_cmpl)
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"2878 ELS XRI exchange busy "
+						"wait time: %d seconds.\n",
+						wait_time/1000);
+			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
+			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
+		} else {
+			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
+			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
+		}
+		fcp_xri_cmpl =
+			list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+		els_xri_cmpl =
+			list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+	}
+}
+
+/**
+ * lpfc_sli4_hba_unset - Unset the fcoe hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI4 code path to reset the HBA's FCoE
+ * function. The caller is not required to hold any lock. This routine
+ * issues PCI function reset mailbox command to reset the FCoE function.
+ * At the end of the function, it calls lpfc_hba_down_post function to
+ * free any pending commands.
+ **/
+static void
+lpfc_sli4_hba_unset(struct lpfc_hba *phba)
+{
+	int wait_cnt = 0;
+	LPFC_MBOXQ_t *mboxq;
+	struct pci_dev *pdev = phba->pcidev;
+
+	lpfc_stop_hba_timers(phba);
+	phba->sli4_hba.intr_enable = 0;
+
+	/*
+	 * Gracefully wait out the potential current outstanding asynchronous
+	 * mailbox command.
+	 */
+
+	/* First, block any pending async mailbox command from posted */
+	spin_lock_irq(&phba->hbalock);
+	phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
+	spin_unlock_irq(&phba->hbalock);
+	/* Now, trying to wait it out if we can */
+	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+		msleep(10);
+		if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
+			break;
+	}
+	/* Forcefully release the outstanding mailbox command if timed out */
+	if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+		spin_lock_irq(&phba->hbalock);
+		mboxq = phba->sli.mbox_active;
+		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
+		__lpfc_mbox_cmpl_put(phba, mboxq);
+		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+		phba->sli.mbox_active = NULL;
+		spin_unlock_irq(&phba->hbalock);
+	}
+
+	/* Abort all iocbs associated with the hba */
+	lpfc_sli_hba_iocb_abort(phba);
+
+	/* Wait for completion of device XRI exchange busy */
+	lpfc_sli4_xri_exchange_busy_wait(phba);
+
+	/* Disable PCI subsystem interrupt */
+	lpfc_sli4_disable_intr(phba);
+
+	/* Disable SR-IOV if enabled */
+	if (phba->cfg_sriov_nr_virtfn)
+		pci_disable_sriov(pdev);
+
+	/* Stop kthread signal shall trigger work_done one more time */
+	kthread_stop(phba->worker_thread);
+
+	/* Reset SLI4 HBA FCoE function */
+	lpfc_pci_function_reset(phba);
+	lpfc_sli4_queue_destroy(phba);
+
+	/* Stop the SLI4 device port */
+	phba->pport->work_port_events = 0;
+}
+
+ /**
+ * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
+ *
+ * This function is called in the SLI4 code path to read the port's
+ * sli4 capabilities.
+ *
+ * This function may be be called from any context that can block-wait
+ * for the completion.  The expectation is that this routine is called
+ * typically from probe_one or from the online routine.
+ **/
+int
+lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	int rc;
+	struct lpfc_mqe *mqe;
+	struct lpfc_pc_sli4_params *sli4_params;
+	uint32_t mbox_tmo;
+
+	rc = 0;
+	mqe = &mboxq->u.mqe;
+
+	/* Read the port's SLI4 Parameters port capabilities */
+	lpfc_pc_sli4_params(mboxq);
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
+		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+	}
+
+	if (unlikely(rc))
+		return 1;
+
+	sli4_params = &phba->sli4_hba.pc_sli4_params;
+	sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
+	sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
+	sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
+	sli4_params->featurelevel_1 = bf_get(featurelevel_1,
+					     &mqe->un.sli4_params);
+	sli4_params->featurelevel_2 = bf_get(featurelevel_2,
+					     &mqe->un.sli4_params);
+	sli4_params->proto_types = mqe->un.sli4_params.word3;
+	sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
+	sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
+	sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
+	sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
+	sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
+	sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
+	sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
+	sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
+	sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
+	sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
+	sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
+	sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
+	sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
+	sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
+	sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
+	sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
+	sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
+	sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
+	sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
+	sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
+
+	/* Make sure that sge_supp_len can be handled by the driver */
+	if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
+		sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+
+	return rc;
+}
+
+/**
+ * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
+ *
+ * This function is called in the SLI4 code path to read the port's
+ * sli4 capabilities.
+ *
+ * This function may be be called from any context that can block-wait
+ * for the completion.  The expectation is that this routine is called
+ * typically from probe_one or from the online routine.
+ **/
+int
+lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	int rc;
+	struct lpfc_mqe *mqe = &mboxq->u.mqe;
+	struct lpfc_pc_sli4_params *sli4_params;
+	uint32_t mbox_tmo;
+	int length;
+	struct lpfc_sli4_parameters *mbx_sli4_parameters;
+
+	/*
+	 * By default, the driver assumes the SLI4 port requires RPI
+	 * header postings.  The SLI4_PARAM response will correct this
+	 * assumption.
+	 */
+	phba->sli4_hba.rpi_hdrs_in_use = 1;
+
+	/* Read the port's SLI4 Config Parameters */
+	length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
+			 length, LPFC_SLI4_MBX_EMBED);
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
+		rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+	}
+	if (unlikely(rc))
+		return rc;
+	sli4_params = &phba->sli4_hba.pc_sli4_params;
+	mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
+	sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
+	sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
+	sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
+	sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
+					     mbx_sli4_parameters);
+	sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
+					     mbx_sli4_parameters);
+	if (bf_get(cfg_phwq, mbx_sli4_parameters))
+		phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
+	else
+		phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
+	sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
+	sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
+	sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
+	sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
+	sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
+	sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
+	sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
+					    mbx_sli4_parameters);
+	sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
+					   mbx_sli4_parameters);
+	phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
+	phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
+
+	/* Make sure that sge_supp_len can be handled by the driver */
+	if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
+		sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+
+	return 0;
+}
+
+/**
+ * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+ *
+ * This routine is to be called to attach a device with SLI-3 interface spec
+ * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
+ * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
+ * information of the device and driver to see if the driver state that it can
+ * support this kind of device. If the match is successful, the driver core
+ * invokes this routine. If this routine determines it can claim the HBA, it
+ * does all the initialization that it needs to do to handle the HBA properly.
+ *
+ * Return code
+ * 	0 - driver can claim the device
+ * 	negative value - driver can not claim the device
+ **/
+static int __devinit
+lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+	struct lpfc_hba   *phba;
+	struct lpfc_vport *vport = NULL;
+	struct Scsi_Host  *shost = NULL;
+	int error;
+	uint32_t cfg_mode, intr_mode;
+
+	/* Allocate memory for HBA structure */
+	phba = lpfc_hba_alloc(pdev);
+	if (!phba)
+		return -ENOMEM;
+
+	/* Perform generic PCI device enabling operation */
+	error = lpfc_enable_pci_dev(phba);
+	if (error)
+		goto out_free_phba;
+
+	/* Set up SLI API function jump table for PCI-device group-0 HBAs */
+	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
+	if (error)
+		goto out_disable_pci_dev;
+
+	/* Set up SLI-3 specific device PCI memory space */
+	error = lpfc_sli_pci_mem_setup(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1402 Failed to set up pci memory space.\n");
+		goto out_disable_pci_dev;
+	}
+
+	/* Set up phase-1 common device driver resources */
+	error = lpfc_setup_driver_resource_phase1(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1403 Failed to set up driver resource.\n");
+		goto out_unset_pci_mem_s3;
+	}
+
+	/* Set up SLI-3 specific device driver resources */
+	error = lpfc_sli_driver_resource_setup(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1404 Failed to set up driver resource.\n");
+		goto out_unset_pci_mem_s3;
+	}
+
+	/* Initialize and populate the iocb list per host */
+	error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1405 Failed to initialize iocb list.\n");
+		goto out_unset_driver_resource_s3;
+	}
+
+	/* Set up common device driver resources */
+	error = lpfc_setup_driver_resource_phase2(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1406 Failed to set up driver resource.\n");
+		goto out_free_iocb_list;
+	}
+
+	/* Get the default values for Model Name and Description */
+	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
+
+	/* Create SCSI host to the physical port */
+	error = lpfc_create_shost(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1407 Failed to create scsi host.\n");
+		goto out_unset_driver_resource;
+	}
+
+	/* Configure sysfs attributes */
+	vport = phba->pport;
+	error = lpfc_alloc_sysfs_attr(vport);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1476 Failed to allocate sysfs attr\n");
+		goto out_destroy_shost;
+	}
+
+	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
+	/* Now, trying to enable interrupt and bring up the device */
+	cfg_mode = phba->cfg_use_msi;
+	while (true) {
+		/* Put device to a known state before enabling interrupt */
+		lpfc_stop_port(phba);
+		/* Configure and enable interrupt */
+		intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
+		if (intr_mode == LPFC_INTR_ERROR) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0431 Failed to enable interrupt.\n");
+			error = -ENODEV;
+			goto out_free_sysfs_attr;
+		}
+		/* SLI-3 HBA setup */
+		if (lpfc_sli_hba_setup(phba)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"1477 Failed to set up hba\n");
+			error = -ENODEV;
+			goto out_remove_device;
+		}
+
+		/* Wait 50ms for the interrupts of previous mailbox commands */
+		msleep(50);
+		/* Check active interrupts on message signaled interrupts */
+		if (intr_mode == 0 ||
+		    phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
+			/* Log the current active interrupt mode */
+			phba->intr_mode = intr_mode;
+			lpfc_log_intr_mode(phba, intr_mode);
+			break;
+		} else {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"0447 Configure interrupt mode (%d) "
+					"failed active interrupt test.\n",
+					intr_mode);
+			/* Disable the current interrupt mode */
+			lpfc_sli_disable_intr(phba);
+			/* Try next level of interrupt mode */
+			cfg_mode = --intr_mode;
+		}
+	}
+
+	/* Perform post initialization setup */
+	lpfc_post_init_setup(phba);
+
+	/* Check if there are static vports to be created. */
+	lpfc_create_static_vport(phba);
+
+	return 0;
+
+out_remove_device:
+	lpfc_unset_hba(phba);
+out_free_sysfs_attr:
+	lpfc_free_sysfs_attr(vport);
+out_destroy_shost:
+	lpfc_destroy_shost(phba);
+out_unset_driver_resource:
+	lpfc_unset_driver_resource_phase2(phba);
+out_free_iocb_list:
+	lpfc_free_iocb_list(phba);
+out_unset_driver_resource_s3:
+	lpfc_sli_driver_resource_unset(phba);
+out_unset_pci_mem_s3:
+	lpfc_sli_pci_mem_unset(phba);
+out_disable_pci_dev:
+	lpfc_disable_pci_dev(phba);
+	if (shost)
+		scsi_host_put(shost);
+out_free_phba:
+	lpfc_hba_free(phba);
+	return error;
+}
+
+/**
+ * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be called to disattach a device with SLI-3 interface
+ * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
+ * removed from PCI bus, it performs all the necessary cleanup for the HBA
+ * device to be removed from the PCI subsystem properly.
+ **/
+static void __devexit
+lpfc_pci_remove_one_s3(struct pci_dev *pdev)
+{
+	struct Scsi_Host  *shost = pci_get_drvdata(pdev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_vport **vports;
+	struct lpfc_hba   *phba = vport->phba;
+	int i;
+	int bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
+	spin_lock_irq(&phba->hbalock);
+	vport->load_flag |= FC_UNLOADING;
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_free_sysfs_attr(vport);
+
+	/* Release all the vports against this physical port */
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
+			fc_vport_terminate(vports[i]->fc_vport);
+	lpfc_destroy_vport_work_array(phba, vports);
+
+	/* Remove FC host and then SCSI host with the physical port */
+	fc_remove_host(shost);
+	scsi_remove_host(shost);
+	lpfc_cleanup(vport);
+
+	/*
+	 * Bring down the SLI Layer. This step disable all interrupts,
+	 * clears the rings, discards all mailbox commands, and resets
+	 * the HBA.
+	 */
+
+	/* HBA interrupt will be disabled after this call */
+	lpfc_sli_hba_down(phba);
+	/* Stop kthread signal shall trigger work_done one more time */
+	kthread_stop(phba->worker_thread);
+	/* Final cleanup of txcmplq and reset the HBA */
+	lpfc_sli_brdrestart(phba);
+
+	kfree(phba->vpi_bmask);
+	kfree(phba->vpi_ids);
+
+	lpfc_stop_hba_timers(phba);
+	spin_lock_irq(&phba->hbalock);
+	list_del_init(&vport->listentry);
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_debugfs_terminate(vport);
+
+	/* Disable SR-IOV if enabled */
+	if (phba->cfg_sriov_nr_virtfn)
+		pci_disable_sriov(pdev);
+
+	/* Disable interrupt */
+	lpfc_sli_disable_intr(phba);
+
+	pci_set_drvdata(pdev, NULL);
+	scsi_host_put(shost);
+
+	/*
+	 * Call scsi_free before mem_free since scsi bufs are released to their
+	 * corresponding pools here.
+	 */
+	lpfc_scsi_free(phba);
+	lpfc_mem_free_all(phba);
+
+	dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
+			  phba->hbqslimp.virt, phba->hbqslimp.phys);
+
+	/* Free resources associated with SLI2 interface */
+	dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+			  phba->slim2p.virt, phba->slim2p.phys);
+
+	/* unmap adapter SLIM and Control Registers */
+	iounmap(phba->ctrl_regs_memmap_p);
+	iounmap(phba->slim_memmap_p);
+
+	lpfc_hba_free(phba);
+
+	pci_release_selected_regions(pdev, bars);
+	pci_disable_device(pdev);
+}
+
+/**
+ * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
+ * @pdev: pointer to PCI device
+ * @msg: power management message
+ *
+ * This routine is to be called from the kernel's PCI subsystem to support
+ * system Power Management (PM) to device with SLI-3 interface spec. When
+ * PM invokes this method, it quiesces the device by stopping the driver's
+ * worker thread for the device, turning off device's interrupt and DMA,
+ * and bring the device offline. Note that as the driver implements the
+ * minimum PM requirements to a power-aware driver's PM support for the
+ * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
+ * to the suspend() method call will be treated as SUSPEND and the driver will
+ * fully reinitialize its device during resume() method call, the driver will
+ * set device to PCI_D3hot state in PCI config space instead of setting it
+ * according to the @msg provided by the PM.
+ *
+ * Return code
+ * 	0 - driver suspended the device
+ * 	Error otherwise
+ **/
+static int
+lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"0473 PCI device Power Management suspend.\n");
+
+	/* Bring down the device */
+	lpfc_offline_prep(phba);
+	lpfc_offline(phba);
+	kthread_stop(phba->worker_thread);
+
+	/* Disable interrupt from device */
+	lpfc_sli_disable_intr(phba);
+
+	/* Save device state to PCI config space */
+	pci_save_state(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return 0;
+}
+
+/**
+ * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be called from the kernel's PCI subsystem to support
+ * system Power Management (PM) to device with SLI-3 interface spec. When PM
+ * invokes this method, it restores the device's PCI config space state and
+ * fully reinitializes the device and brings it online. Note that as the
+ * driver implements the minimum PM requirements to a power-aware driver's
+ * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
+ * FREEZE) to the suspend() method call will be treated as SUSPEND and the
+ * driver will fully reinitialize its device during resume() method call,
+ * the device will be set to PCI_D0 directly in PCI config space before
+ * restoring the state.
+ *
+ * Return code
+ * 	0 - driver suspended the device
+ * 	Error otherwise
+ **/
+static int
+lpfc_pci_resume_one_s3(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	uint32_t intr_mode;
+	int error;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"0452 PCI device Power Management resume.\n");
+
+	/* Restore device state from PCI config space */
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	/*
+	 * As the new kernel behavior of pci_restore_state() API call clears
+	 * device saved_state flag, need to save the restored state again.
+	 */
+	pci_save_state(pdev);
+
+	if (pdev->is_busmaster)
+		pci_set_master(pdev);
+
+	/* Startup the kernel thread for this host adapter. */
+	phba->worker_thread = kthread_run(lpfc_do_work, phba,
+					"lpfc_worker_%d", phba->brd_no);
+	if (IS_ERR(phba->worker_thread)) {
+		error = PTR_ERR(phba->worker_thread);
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0434 PM resume failed to start worker "
+				"thread: error=x%x.\n", error);
+		return error;
+	}
+
+	/* Configure and enable interrupt */
+	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
+	if (intr_mode == LPFC_INTR_ERROR) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0430 PM resume Failed to enable interrupt\n");
+		return -EIO;
+	} else
+		phba->intr_mode = intr_mode;
+
+	/* Restart HBA and bring it online */
+	lpfc_sli_brdrestart(phba);
+	lpfc_online(phba);
+
+	/* Log the current active interrupt mode */
+	lpfc_log_intr_mode(phba, phba->intr_mode);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI3 device for PCI slot recover. It
+ * aborts all the outstanding SCSI I/Os to the pci device.
+ **/
+static void
+lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring  *pring;
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2723 PCI channel I/O abort preparing for recovery\n");
+
+	/*
+	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
+	 * and let the SCSI mid-layer to retry them to recover.
+	 */
+	pring = &psli->ring[psli->fcp_ring];
+	lpfc_sli_abort_iocb_ring(phba, pring);
+}
+
+/**
+ * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI3 device for PCI slot reset. It
+ * disables the device interrupt and pci device, and aborts the internal FCP
+ * pending I/Os.
+ **/
+static void
+lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
+{
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2710 PCI channel disable preparing for reset\n");
+
+	/* Block any management I/Os to the device */
+	lpfc_block_mgmt_io(phba);
+
+	/* Block all SCSI devices' I/Os on the host */
+	lpfc_scsi_dev_block(phba);
+
+	/* stop all timers */
+	lpfc_stop_hba_timers(phba);
+
+	/* Disable interrupt and pci device */
+	lpfc_sli_disable_intr(phba);
+	pci_disable_device(phba->pcidev);
+
+	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
+	lpfc_sli_flush_fcp_rings(phba);
+}
+
+/**
+ * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI3 device for PCI slot permanently
+ * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
+ * pending I/Os.
+ **/
+static void
+lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
+{
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2711 PCI channel permanent disable for failure\n");
+	/* Block all SCSI devices' I/Os on the host */
+	lpfc_scsi_dev_block(phba);
+
+	/* stop all timers */
+	lpfc_stop_hba_timers(phba);
+
+	/* Clean up all driver's outstanding SCSI I/Os */
+	lpfc_sli_flush_fcp_rings(phba);
+}
+
+/**
+ * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
+ *
+ * This routine is called from the PCI subsystem for I/O error handling to
+ * device with SLI-3 interface spec. This function is called by the PCI
+ * subsystem after a PCI bus error affecting this device has been detected.
+ * When this function is invoked, it will need to stop all the I/Os and
+ * interrupt(s) to the device. Once that is done, it will return
+ * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
+ * as desired.
+ *
+ * Return codes
+ * 	PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
+ * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	switch (state) {
+	case pci_channel_io_normal:
+		/* Non-fatal error, prepare for recovery */
+		lpfc_sli_prep_dev_for_recover(phba);
+		return PCI_ERS_RESULT_CAN_RECOVER;
+	case pci_channel_io_frozen:
+		/* Fatal error, prepare for slot reset */
+		lpfc_sli_prep_dev_for_reset(phba);
+		return PCI_ERS_RESULT_NEED_RESET;
+	case pci_channel_io_perm_failure:
+		/* Permanent failure, prepare for device down */
+		lpfc_sli_prep_dev_for_perm_failure(phba);
+		return PCI_ERS_RESULT_DISCONNECT;
+	default:
+		/* Unknown state, prepare and request slot reset */
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0472 Unknown PCI error state: x%x\n", state);
+		lpfc_sli_prep_dev_for_reset(phba);
+		return PCI_ERS_RESULT_NEED_RESET;
+	}
+}
+
+/**
+ * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
+ * @pdev: pointer to PCI device.
+ *
+ * This routine is called from the PCI subsystem for error handling to
+ * device with SLI-3 interface spec. This is called after PCI bus has been
+ * reset to restart the PCI card from scratch, as if from a cold-boot.
+ * During the PCI subsystem error recovery, after driver returns
+ * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
+ * recovery and then call this routine before calling the .resume method
+ * to recover the device. This function will initialize the HBA device,
+ * enable the interrupt, but it will just put the HBA to offline state
+ * without passing any I/O traffic.
+ *
+ * Return codes
+ * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
+ * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ */
+static pci_ers_result_t
+lpfc_io_slot_reset_s3(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	struct lpfc_sli *psli = &phba->sli;
+	uint32_t intr_mode;
+
+	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
+	if (pci_enable_device_mem(pdev)) {
+		printk(KERN_ERR "lpfc: Cannot re-enable "
+			"PCI device after reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	pci_restore_state(pdev);
+
+	/*
+	 * As the new kernel behavior of pci_restore_state() API call clears
+	 * device saved_state flag, need to save the restored state again.
+	 */
+	pci_save_state(pdev);
+
+	if (pdev->is_busmaster)
+		pci_set_master(pdev);
+
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Configure and enable interrupt */
+	intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
+	if (intr_mode == LPFC_INTR_ERROR) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0427 Cannot re-enable interrupt after "
+				"slot reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	} else
+		phba->intr_mode = intr_mode;
+
+	/* Take device offline, it will perform cleanup */
+	lpfc_offline_prep(phba);
+	lpfc_offline(phba);
+	lpfc_sli_brdrestart(phba);
+
+	/* Log the current active interrupt mode */
+	lpfc_log_intr_mode(phba, phba->intr_mode);
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
+ * @pdev: pointer to PCI device
+ *
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-3 interface spec. It is called when kernel error recovery tells
+ * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
+ * error recovery. After this call, traffic can start to flow from this device
+ * again.
+ */
+static void
+lpfc_io_resume_s3(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	/* Bring device online, it will be no-op for non-fatal error resume */
+	lpfc_online(phba);
+
+	/* Clean up Advanced Error Reporting (AER) if needed */
+	if (phba->hba_flag & HBA_AER_ENABLED)
+		pci_cleanup_aer_uncorrect_error_status(pdev);
+}
+
+/**
+ * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * returns the number of ELS/CT IOCBs to reserve
+ **/
+int
+lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
+{
+	int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
+
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		if (max_xri <= 100)
+			return 10;
+		else if (max_xri <= 256)
+			return 25;
+		else if (max_xri <= 512)
+			return 50;
+		else if (max_xri <= 1024)
+			return 100;
+		else
+			return 150;
+	} else
+		return 0;
+}
+
+/**
+ * lpfc_write_firmware - attempt to write a firmware image to the port
+ * @phba: pointer to lpfc hba data structure.
+ * @fw: pointer to firmware image returned from request_firmware.
+ *
+ * returns the number of bytes written if write is successful.
+ * returns a negative error value if there were errors.
+ * returns 0 if firmware matches currently active firmware on port.
+ **/
+int
+lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
+{
+	char fwrev[FW_REV_STR_SIZE];
+	struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
+	struct list_head dma_buffer_list;
+	int i, rc = 0;
+	struct lpfc_dmabuf *dmabuf, *next;
+	uint32_t offset = 0, temp_offset = 0;
+
+	INIT_LIST_HEAD(&dma_buffer_list);
+	if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
+	    (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
+	     LPFC_FILE_TYPE_GROUP) ||
+	    (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
+	    (be32_to_cpu(image->size) != fw->size)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3022 Invalid FW image found. "
+				"Magic:%x Type:%x ID:%x\n",
+				be32_to_cpu(image->magic_number),
+				bf_get_be32(lpfc_grp_hdr_file_type, image),
+				bf_get_be32(lpfc_grp_hdr_id, image));
+		return -EINVAL;
+	}
+	lpfc_decode_firmware_rev(phba, fwrev, 1);
+	if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3023 Updating Firmware. Current Version:%s "
+				"New Version:%s\n",
+				fwrev, image->revision);
+		for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
+			dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
+					 GFP_KERNEL);
+			if (!dmabuf) {
+				rc = -ENOMEM;
+				goto out;
+			}
+			dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+							  SLI4_PAGE_SIZE,
+							  &dmabuf->phys,
+							  GFP_KERNEL);
+			if (!dmabuf->virt) {
+				kfree(dmabuf);
+				rc = -ENOMEM;
+				goto out;
+			}
+			list_add_tail(&dmabuf->list, &dma_buffer_list);
+		}
+		while (offset < fw->size) {
+			temp_offset = offset;
+			list_for_each_entry(dmabuf, &dma_buffer_list, list) {
+				if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
+					memcpy(dmabuf->virt,
+					       fw->data + temp_offset,
+					       fw->size - temp_offset);
+					temp_offset = fw->size;
+					break;
+				}
+				memcpy(dmabuf->virt, fw->data + temp_offset,
+				       SLI4_PAGE_SIZE);
+				temp_offset += SLI4_PAGE_SIZE;
+			}
+			rc = lpfc_wr_object(phba, &dma_buffer_list,
+				    (fw->size - offset), &offset);
+			if (rc) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"3024 Firmware update failed. "
+						"%d\n", rc);
+				goto out;
+			}
+		}
+		rc = offset;
+	}
+out:
+	list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
+		list_del(&dmabuf->list);
+		dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
+				  dmabuf->virt, dmabuf->phys);
+		kfree(dmabuf);
+	}
+	return rc;
+}
+
+/**
+ * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+ *
+ * This routine is called from the kernel's PCI subsystem to device with
+ * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
+ * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
+ * information of the device and driver to see if the driver state that it
+ * can support this kind of device. If the match is successful, the driver
+ * core invokes this routine. If this routine determines it can claim the HBA,
+ * it does all the initialization that it needs to do to handle the HBA
+ * properly.
+ *
+ * Return code
+ * 	0 - driver can claim the device
+ * 	negative value - driver can not claim the device
+ **/
+static int __devinit
+lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+	struct lpfc_hba   *phba;
+	struct lpfc_vport *vport = NULL;
+	struct Scsi_Host  *shost = NULL;
+	int error;
+	uint32_t cfg_mode, intr_mode;
+	int mcnt;
+	int adjusted_fcp_eq_count;
+	const struct firmware *fw;
+	uint8_t file_name[16];
+
+	/* Allocate memory for HBA structure */
+	phba = lpfc_hba_alloc(pdev);
+	if (!phba)
+		return -ENOMEM;
+
+	/* Perform generic PCI device enabling operation */
+	error = lpfc_enable_pci_dev(phba);
+	if (error)
+		goto out_free_phba;
+
+	/* Set up SLI API function jump table for PCI-device group-1 HBAs */
+	error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
+	if (error)
+		goto out_disable_pci_dev;
+
+	/* Set up SLI-4 specific device PCI memory space */
+	error = lpfc_sli4_pci_mem_setup(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1410 Failed to set up pci memory space.\n");
+		goto out_disable_pci_dev;
+	}
+
+	/* Set up phase-1 common device driver resources */
+	error = lpfc_setup_driver_resource_phase1(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1411 Failed to set up driver resource.\n");
+		goto out_unset_pci_mem_s4;
+	}
+
+	/* Set up SLI-4 Specific device driver resources */
+	error = lpfc_sli4_driver_resource_setup(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1412 Failed to set up driver resource.\n");
+		goto out_unset_pci_mem_s4;
+	}
+
+	/* Initialize and populate the iocb list per host */
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2821 initialize iocb list %d.\n",
+			phba->cfg_iocb_cnt*1024);
+	error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
+
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1413 Failed to initialize iocb list.\n");
+		goto out_unset_driver_resource_s4;
+	}
+
+	INIT_LIST_HEAD(&phba->active_rrq_list);
+	INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
+
+	/* Set up common device driver resources */
+	error = lpfc_setup_driver_resource_phase2(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1414 Failed to set up driver resource.\n");
+		goto out_free_iocb_list;
+	}
+
+	/* Get the default values for Model Name and Description */
+	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
+
+	/* Create SCSI host to the physical port */
+	error = lpfc_create_shost(phba);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1415 Failed to create scsi host.\n");
+		goto out_unset_driver_resource;
+	}
+
+	/* Configure sysfs attributes */
+	vport = phba->pport;
+	error = lpfc_alloc_sysfs_attr(vport);
+	if (error) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1416 Failed to allocate sysfs attr\n");
+		goto out_destroy_shost;
+	}
+
+	shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
+	/* Now, trying to enable interrupt and bring up the device */
+	cfg_mode = phba->cfg_use_msi;
+	while (true) {
+		/* Put device to a known state before enabling interrupt */
+		lpfc_stop_port(phba);
+		/* Configure and enable interrupt */
+		intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
+		if (intr_mode == LPFC_INTR_ERROR) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0426 Failed to enable interrupt.\n");
+			error = -ENODEV;
+			goto out_free_sysfs_attr;
+		}
+		/* Default to single EQ for non-MSI-X */
+		if (phba->intr_type != MSIX)
+			adjusted_fcp_eq_count = 0;
+		else if (phba->sli4_hba.msix_vec_nr <
+					phba->cfg_fcp_eq_count + 1)
+			adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
+		else
+			adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
+		phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
+		/* Set up SLI-4 HBA */
+		if (lpfc_sli4_hba_setup(phba)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"1421 Failed to set up hba\n");
+			error = -ENODEV;
+			goto out_disable_intr;
+		}
+
+		/* Send NOP mbx cmds for non-INTx mode active interrupt test */
+		if (intr_mode != 0)
+			mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
+							    LPFC_ACT_INTR_CNT);
+
+		/* Check active interrupts received only for MSI/MSI-X */
+		if (intr_mode == 0 ||
+		    phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
+			/* Log the current active interrupt mode */
+			phba->intr_mode = intr_mode;
+			lpfc_log_intr_mode(phba, intr_mode);
+			break;
+		}
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"0451 Configure interrupt mode (%d) "
+				"failed active interrupt test.\n",
+				intr_mode);
+		/* Unset the previous SLI-4 HBA setup. */
+		/*
+		 * TODO:  Is this operation compatible with IF TYPE 2
+		 * devices?  All port state is deleted and cleared.
+		 */
+		lpfc_sli4_unset_hba(phba);
+		/* Try next level of interrupt mode */
+		cfg_mode = --intr_mode;
+	}
+
+	/* Perform post initialization setup */
+	lpfc_post_init_setup(phba);
+
+	/* check for firmware upgrade or downgrade (if_type 2 only) */
+	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+	    LPFC_SLI_INTF_IF_TYPE_2) {
+		snprintf(file_name, 16, "%s.grp", phba->ModelName);
+		error = request_firmware(&fw, file_name, &phba->pcidev->dev);
+		if (!error) {
+			lpfc_write_firmware(phba, fw);
+			release_firmware(fw);
+		}
+	}
+
+	/* Check if there are static vports to be created. */
+	lpfc_create_static_vport(phba);
+	return 0;
+
+out_disable_intr:
+	lpfc_sli4_disable_intr(phba);
+out_free_sysfs_attr:
+	lpfc_free_sysfs_attr(vport);
+out_destroy_shost:
+	lpfc_destroy_shost(phba);
+out_unset_driver_resource:
+	lpfc_unset_driver_resource_phase2(phba);
+out_free_iocb_list:
+	lpfc_free_iocb_list(phba);
+out_unset_driver_resource_s4:
+	lpfc_sli4_driver_resource_unset(phba);
+out_unset_pci_mem_s4:
+	lpfc_sli4_pci_mem_unset(phba);
+out_disable_pci_dev:
+	lpfc_disable_pci_dev(phba);
+	if (shost)
+		scsi_host_put(shost);
+out_free_phba:
+	lpfc_hba_free(phba);
+	return error;
+}
+
+/**
+ * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
+ * @pdev: pointer to PCI device
+ *
+ * This routine is called from the kernel's PCI subsystem to device with
+ * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
+ * removed from PCI bus, it performs all the necessary cleanup for the HBA
+ * device to be removed from the PCI subsystem properly.
+ **/
+static void __devexit
+lpfc_pci_remove_one_s4(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_vport **vports;
+	struct lpfc_hba *phba = vport->phba;
+	int i;
+
+	/* Mark the device unloading flag */
+	spin_lock_irq(&phba->hbalock);
+	vport->load_flag |= FC_UNLOADING;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Free the HBA sysfs attributes */
+	lpfc_free_sysfs_attr(vport);
+
+	/* Release all the vports against this physical port */
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
+			fc_vport_terminate(vports[i]->fc_vport);
+	lpfc_destroy_vport_work_array(phba, vports);
+
+	/* Remove FC host and then SCSI host with the physical port */
+	fc_remove_host(shost);
+	scsi_remove_host(shost);
+
+	/* Perform cleanup on the physical port */
+	lpfc_cleanup(vport);
+
+	/*
+	 * Bring down the SLI Layer. This step disables all interrupts,
+	 * clears the rings, discards all mailbox commands, and resets
+	 * the HBA FCoE function.
+	 */
+	lpfc_debugfs_terminate(vport);
+	lpfc_sli4_hba_unset(phba);
+
+	spin_lock_irq(&phba->hbalock);
+	list_del_init(&vport->listentry);
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Perform scsi free before driver resource_unset since scsi
+	 * buffers are released to their corresponding pools here.
+	 */
+	lpfc_scsi_free(phba);
+	lpfc_sli4_driver_resource_unset(phba);
+
+	/* Unmap adapter Control and Doorbell registers */
+	lpfc_sli4_pci_mem_unset(phba);
+
+	/* Release PCI resources and disable device's PCI function */
+	scsi_host_put(shost);
+	lpfc_disable_pci_dev(phba);
+
+	/* Finally, free the driver's device data structure */
+	lpfc_hba_free(phba);
+
+	return;
+}
+
+/**
+ * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
+ * @pdev: pointer to PCI device
+ * @msg: power management message
+ *
+ * This routine is called from the kernel's PCI subsystem to support system
+ * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
+ * this method, it quiesces the device by stopping the driver's worker
+ * thread for the device, turning off device's interrupt and DMA, and bring
+ * the device offline. Note that as the driver implements the minimum PM
+ * requirements to a power-aware driver's PM support for suspend/resume -- all
+ * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
+ * method call will be treated as SUSPEND and the driver will fully
+ * reinitialize its device during resume() method call, the driver will set
+ * device to PCI_D3hot state in PCI config space instead of setting it
+ * according to the @msg provided by the PM.
+ *
+ * Return code
+ * 	0 - driver suspended the device
+ * 	Error otherwise
+ **/
+static int
+lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"2843 PCI device Power Management suspend.\n");
+
+	/* Bring down the device */
+	lpfc_offline_prep(phba);
+	lpfc_offline(phba);
+	kthread_stop(phba->worker_thread);
+
+	/* Disable interrupt from device */
+	lpfc_sli4_disable_intr(phba);
+	lpfc_sli4_queue_destroy(phba);
+
+	/* Save device state to PCI config space */
+	pci_save_state(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return 0;
+}
+
+/**
+ * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
+ * @pdev: pointer to PCI device
+ *
+ * This routine is called from the kernel's PCI subsystem to support system
+ * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
+ * this method, it restores the device's PCI config space state and fully
+ * reinitializes the device and brings it online. Note that as the driver
+ * implements the minimum PM requirements to a power-aware driver's PM for
+ * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
+ * to the suspend() method call will be treated as SUSPEND and the driver
+ * will fully reinitialize its device during resume() method call, the device
+ * will be set to PCI_D0 directly in PCI config space before restoring the
+ * state.
+ *
+ * Return code
+ * 	0 - driver suspended the device
+ * 	Error otherwise
+ **/
+static int
+lpfc_pci_resume_one_s4(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	uint32_t intr_mode;
+	int error;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"0292 PCI device Power Management resume.\n");
+
+	/* Restore device state from PCI config space */
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	/*
+	 * As the new kernel behavior of pci_restore_state() API call clears
+	 * device saved_state flag, need to save the restored state again.
+	 */
+	pci_save_state(pdev);
+
+	if (pdev->is_busmaster)
+		pci_set_master(pdev);
+
+	 /* Startup the kernel thread for this host adapter. */
+	phba->worker_thread = kthread_run(lpfc_do_work, phba,
+					"lpfc_worker_%d", phba->brd_no);
+	if (IS_ERR(phba->worker_thread)) {
+		error = PTR_ERR(phba->worker_thread);
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0293 PM resume failed to start worker "
+				"thread: error=x%x.\n", error);
+		return error;
+	}
+
+	/* Configure and enable interrupt */
+	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
+	if (intr_mode == LPFC_INTR_ERROR) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0294 PM resume Failed to enable interrupt\n");
+		return -EIO;
+	} else
+		phba->intr_mode = intr_mode;
+
+	/* Restart HBA and bring it online */
+	lpfc_sli_brdrestart(phba);
+	lpfc_online(phba);
+
+	/* Log the current active interrupt mode */
+	lpfc_log_intr_mode(phba, phba->intr_mode);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI4 device for PCI slot recover. It
+ * aborts all the outstanding SCSI I/Os to the pci device.
+ **/
+static void
+lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring  *pring;
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2828 PCI channel I/O abort preparing for recovery\n");
+	/*
+	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
+	 * and let the SCSI mid-layer to retry them to recover.
+	 */
+	pring = &psli->ring[psli->fcp_ring];
+	lpfc_sli_abort_iocb_ring(phba, pring);
+}
+
+/**
+ * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI4 device for PCI slot reset. It
+ * disables the device interrupt and pci device, and aborts the internal FCP
+ * pending I/Os.
+ **/
+static void
+lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
+{
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2826 PCI channel disable preparing for reset\n");
+
+	/* Block any management I/Os to the device */
+	lpfc_block_mgmt_io(phba);
+
+	/* Block all SCSI devices' I/Os on the host */
+	lpfc_scsi_dev_block(phba);
+
+	/* stop all timers */
+	lpfc_stop_hba_timers(phba);
+
+	/* Disable interrupt and pci device */
+	lpfc_sli4_disable_intr(phba);
+	lpfc_sli4_queue_destroy(phba);
+	pci_disable_device(phba->pcidev);
+
+	/* Flush all driver's outstanding SCSI I/Os as we are to reset */
+	lpfc_sli_flush_fcp_rings(phba);
+}
+
+/**
+ * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to prepare the SLI4 device for PCI slot permanently
+ * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
+ * pending I/Os.
+ **/
+static void
+lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
+{
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2827 PCI channel permanent disable for failure\n");
+
+	/* Block all SCSI devices' I/Os on the host */
+	lpfc_scsi_dev_block(phba);
+
+	/* stop all timers */
+	lpfc_stop_hba_timers(phba);
+
+	/* Clean up all driver's outstanding SCSI I/Os */
+	lpfc_sli_flush_fcp_rings(phba);
+}
+
+/**
+ * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
+ *
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-4 interface spec. This function is called by the PCI subsystem
+ * after a PCI bus error affecting this device has been detected. When this
+ * function is invoked, it will need to stop all the I/Os and interrupt(s)
+ * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
+ * for the PCI subsystem to perform proper recovery as desired.
+ *
+ * Return codes
+ * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	switch (state) {
+	case pci_channel_io_normal:
+		/* Non-fatal error, prepare for recovery */
+		lpfc_sli4_prep_dev_for_recover(phba);
+		return PCI_ERS_RESULT_CAN_RECOVER;
+	case pci_channel_io_frozen:
+		/* Fatal error, prepare for slot reset */
+		lpfc_sli4_prep_dev_for_reset(phba);
+		return PCI_ERS_RESULT_NEED_RESET;
+	case pci_channel_io_perm_failure:
+		/* Permanent failure, prepare for device down */
+		lpfc_sli4_prep_dev_for_perm_failure(phba);
+		return PCI_ERS_RESULT_DISCONNECT;
+	default:
+		/* Unknown state, prepare and request slot reset */
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2825 Unknown PCI error state: x%x\n", state);
+		lpfc_sli4_prep_dev_for_reset(phba);
+		return PCI_ERS_RESULT_NEED_RESET;
+	}
+}
+
+/**
+ * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
+ * @pdev: pointer to PCI device.
+ *
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-4 interface spec. It is called after PCI bus has been reset to
+ * restart the PCI card from scratch, as if from a cold-boot. During the
+ * PCI subsystem error recovery, after the driver returns
+ * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
+ * recovery and then call this routine before calling the .resume method to
+ * recover the device. This function will initialize the HBA device, enable
+ * the interrupt, but it will just put the HBA to offline state without
+ * passing any I/O traffic.
+ *
+ * Return codes
+ * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
+ * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ */
+static pci_ers_result_t
+lpfc_io_slot_reset_s4(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	struct lpfc_sli *psli = &phba->sli;
+	uint32_t intr_mode;
+
+	dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
+	if (pci_enable_device_mem(pdev)) {
+		printk(KERN_ERR "lpfc: Cannot re-enable "
+			"PCI device after reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	pci_restore_state(pdev);
+
+	/*
+	 * As the new kernel behavior of pci_restore_state() API call clears
+	 * device saved_state flag, need to save the restored state again.
+	 */
+	pci_save_state(pdev);
+
+	if (pdev->is_busmaster)
+		pci_set_master(pdev);
+
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Configure and enable interrupt */
+	intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
+	if (intr_mode == LPFC_INTR_ERROR) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2824 Cannot re-enable interrupt after "
+				"slot reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	} else
+		phba->intr_mode = intr_mode;
+
+	/* Log the current active interrupt mode */
+	lpfc_log_intr_mode(phba, phba->intr_mode);
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
+ * @pdev: pointer to PCI device
+ *
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-4 interface spec. It is called when kernel error recovery tells
+ * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
+ * error recovery. After this call, traffic can start to flow from this device
+ * again.
+ **/
+static void
+lpfc_io_resume_s4(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	/*
+	 * In case of slot reset, as function reset is performed through
+	 * mailbox command which needs DMA to be enabled, this operation
+	 * has to be moved to the io resume phase. Taking device offline
+	 * will perform the necessary cleanup.
+	 */
+	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
+		/* Perform device reset */
+		lpfc_offline_prep(phba);
+		lpfc_offline(phba);
+		lpfc_sli_brdrestart(phba);
+		/* Bring the device back online */
+		lpfc_online(phba);
+	}
+
+	/* Clean up Advanced Error Reporting (AER) if needed */
+	if (phba->hba_flag & HBA_AER_ENABLED)
+		pci_cleanup_aer_uncorrect_error_status(pdev);
+}
+
+/**
+ * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+ *
+ * This routine is to be registered to the kernel's PCI subsystem. When an
+ * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
+ * at PCI device-specific information of the device and driver to see if the
+ * driver state that it can support this kind of device. If the match is
+ * successful, the driver core invokes this routine. This routine dispatches
+ * the action to the proper SLI-3 or SLI-4 device probing routine, which will
+ * do all the initialization that it needs to do to handle the HBA device
+ * properly.
+ *
+ * Return code
+ * 	0 - driver can claim the device
+ * 	negative value - driver can not claim the device
+ **/
+static int __devinit
+lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+	int rc;
+	struct lpfc_sli_intf intf;
+
+	if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
+		return -ENODEV;
+
+	if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
+	    (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
+		rc = lpfc_pci_probe_one_s4(pdev, pid);
+	else
+		rc = lpfc_pci_probe_one_s3(pdev, pid);
+
+	return rc;
+}
+
+/**
+ * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be registered to the kernel's PCI subsystem. When an
+ * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
+ * This routine dispatches the action to the proper SLI-3 or SLI-4 device
+ * remove routine, which will perform all the necessary cleanup for the
+ * device to be removed from the PCI subsystem properly.
+ **/
+static void __devexit
+lpfc_pci_remove_one(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		lpfc_pci_remove_one_s3(pdev);
+		break;
+	case LPFC_PCI_DEV_OC:
+		lpfc_pci_remove_one_s4(pdev);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1424 Invalid PCI device group: 0x%x\n",
+				phba->pci_dev_grp);
+		break;
+	}
+	return;
+}
+
+/**
+ * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
+ * @pdev: pointer to PCI device
+ * @msg: power management message
+ *
+ * This routine is to be registered to the kernel's PCI subsystem to support
+ * system Power Management (PM). When PM invokes this method, it dispatches
+ * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
+ * suspend the device.
+ *
+ * Return code
+ * 	0 - driver suspended the device
+ * 	Error otherwise
+ **/
+static int
+lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	int rc = -ENODEV;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		rc = lpfc_pci_suspend_one_s3(pdev, msg);
+		break;
+	case LPFC_PCI_DEV_OC:
+		rc = lpfc_pci_suspend_one_s4(pdev, msg);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1425 Invalid PCI device group: 0x%x\n",
+				phba->pci_dev_grp);
+		break;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be registered to the kernel's PCI subsystem to support
+ * system Power Management (PM). When PM invokes this method, it dispatches
+ * the action to the proper SLI-3 or SLI-4 device resume routine, which will
+ * resume the device.
+ *
+ * Return code
+ * 	0 - driver suspended the device
+ * 	Error otherwise
+ **/
+static int
+lpfc_pci_resume_one(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	int rc = -ENODEV;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		rc = lpfc_pci_resume_one_s3(pdev);
+		break;
+	case LPFC_PCI_DEV_OC:
+		rc = lpfc_pci_resume_one_s4(pdev);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1426 Invalid PCI device group: 0x%x\n",
+				phba->pci_dev_grp);
+		break;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_io_error_detected - lpfc method for handling PCI I/O error
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
+ *
+ * This routine is registered to the PCI subsystem for error handling. This
+ * function is called by the PCI subsystem after a PCI bus error affecting
+ * this device has been detected. When this routine is invoked, it dispatches
+ * the action to the proper SLI-3 or SLI-4 device error detected handling
+ * routine, which will perform the proper error detected operation.
+ *
+ * Return codes
+ * 	PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		rc = lpfc_io_error_detected_s3(pdev, state);
+		break;
+	case LPFC_PCI_DEV_OC:
+		rc = lpfc_io_error_detected_s4(pdev, state);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1427 Invalid PCI device group: 0x%x\n",
+				phba->pci_dev_grp);
+		break;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
+ * @pdev: pointer to PCI device.
+ *
+ * This routine is registered to the PCI subsystem for error handling. This
+ * function is called after PCI bus has been reset to restart the PCI card
+ * from scratch, as if from a cold-boot. When this routine is invoked, it
+ * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
+ * routine, which will perform the proper device reset.
+ *
+ * Return codes
+ * 	PCI_ERS_RESULT_RECOVERED - the device has been recovered
+ * 	PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_slot_reset(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		rc = lpfc_io_slot_reset_s3(pdev);
+		break;
+	case LPFC_PCI_DEV_OC:
+		rc = lpfc_io_slot_reset_s4(pdev);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1428 Invalid PCI device group: 0x%x\n",
+				phba->pci_dev_grp);
+		break;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_io_resume - lpfc method for resuming PCI I/O operation
+ * @pdev: pointer to PCI device
+ *
+ * This routine is registered to the PCI subsystem for error handling. It
+ * is called when kernel error recovery tells the lpfc driver that it is
+ * OK to resume normal PCI operation after PCI bus error recovery. When
+ * this routine is invoked, it dispatches the action to the proper SLI-3
+ * or SLI-4 device io_resume routine, which will resume the device operation.
+ **/
+static void
+lpfc_io_resume(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+	switch (phba->pci_dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		lpfc_io_resume_s3(pdev);
+		break;
+	case LPFC_PCI_DEV_OC:
+		lpfc_io_resume_s4(pdev);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1429 Invalid PCI device group: 0x%x\n",
+				phba->pci_dev_grp);
+		break;
+	}
+	return;
+}
+
+/**
+ * lpfc_mgmt_open - method called when 'lpfcmgmt' is opened from userspace
+ * @inode: pointer to the inode representing the lpfcmgmt device
+ * @filep: pointer to the file representing the open lpfcmgmt device
+ *
+ * This routine puts a reference count on the lpfc module whenever the
+ * character device is opened
+ **/
+static int
+lpfc_mgmt_open(struct inode *inode, struct file *filep)
+{
+	try_module_get(THIS_MODULE);
+	return 0;
+}
+
+/**
+ * lpfc_mgmt_release - method called when 'lpfcmgmt' is closed in userspace
+ * @inode: pointer to the inode representing the lpfcmgmt device
+ * @filep: pointer to the file representing the open lpfcmgmt device
+ *
+ * This routine removes a reference count from the lpfc module when the
+ * character device is closed
+ **/
+static int
+lpfc_mgmt_release(struct inode *inode, struct file *filep)
+{
+	module_put(THIS_MODULE);
+	return 0;
+}
+
+static struct pci_device_id lpfc_id_table[] = {
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, lpfc_id_table);
+
+static struct pci_error_handlers lpfc_err_handler = {
+	.error_detected = lpfc_io_error_detected,
+	.slot_reset = lpfc_io_slot_reset,
+	.resume = lpfc_io_resume,
+};
+
+static struct pci_driver lpfc_driver = {
+	.name		= LPFC_DRIVER_NAME,
+	.id_table	= lpfc_id_table,
+	.probe		= lpfc_pci_probe_one,
+	.remove		= __devexit_p(lpfc_pci_remove_one),
+	.suspend        = lpfc_pci_suspend_one,
+	.resume		= lpfc_pci_resume_one,
+	.err_handler    = &lpfc_err_handler,
+};
+
+static const struct file_operations lpfc_mgmt_fop = {
+	.open = lpfc_mgmt_open,
+	.release = lpfc_mgmt_release,
+};
+
+static struct miscdevice lpfc_mgmt_dev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "lpfcmgmt",
+	.fops = &lpfc_mgmt_fop,
+};
+
+/**
+ * lpfc_init - lpfc module initialization routine
+ *
+ * This routine is to be invoked when the lpfc module is loaded into the
+ * kernel. The special kernel macro module_init() is used to indicate the
+ * role of this routine to the kernel as lpfc module entry point.
+ *
+ * Return codes
+ *   0 - successful
+ *   -ENOMEM - FC attach transport failed
+ *   all others - failed
+ */
+static int __init
+lpfc_init(void)
+{
+	int error = 0;
+
+	printk(LPFC_MODULE_DESC "\n");
+	printk(LPFC_COPYRIGHT "\n");
+
+	error = misc_register(&lpfc_mgmt_dev);
+	if (error)
+		printk(KERN_ERR "Could not register lpfcmgmt device, "
+			"misc_register returned with status %d", error);
+
+	if (lpfc_enable_npiv) {
+		lpfc_transport_functions.vport_create = lpfc_vport_create;
+		lpfc_transport_functions.vport_delete = lpfc_vport_delete;
+	}
+	lpfc_transport_template =
+				fc_attach_transport(&lpfc_transport_functions);
+	if (lpfc_transport_template == NULL)
+		return -ENOMEM;
+	if (lpfc_enable_npiv) {
+		lpfc_vport_transport_template =
+			fc_attach_transport(&lpfc_vport_transport_functions);
+		if (lpfc_vport_transport_template == NULL) {
+			fc_release_transport(lpfc_transport_template);
+			return -ENOMEM;
+		}
+	}
+	error = pci_register_driver(&lpfc_driver);
+	if (error) {
+		fc_release_transport(lpfc_transport_template);
+		if (lpfc_enable_npiv)
+			fc_release_transport(lpfc_vport_transport_template);
+	}
+
+	return error;
+}
+
+/**
+ * lpfc_exit - lpfc module removal routine
+ *
+ * This routine is invoked when the lpfc module is removed from the kernel.
+ * The special kernel macro module_exit() is used to indicate the role of
+ * this routine to the kernel as lpfc module exit point.
+ */
+static void __exit
+lpfc_exit(void)
+{
+	misc_deregister(&lpfc_mgmt_dev);
+	pci_unregister_driver(&lpfc_driver);
+	fc_release_transport(lpfc_transport_template);
+	if (lpfc_enable_npiv)
+		fc_release_transport(lpfc_vport_transport_template);
+	if (_dump_buf_data) {
+		printk(KERN_ERR	"9062 BLKGRD: freeing %lu pages for "
+				"_dump_buf_data at 0x%p\n",
+				(1L << _dump_buf_data_order), _dump_buf_data);
+		free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
+	}
+
+	if (_dump_buf_dif) {
+		printk(KERN_ERR	"9049 BLKGRD: freeing %lu pages for "
+				"_dump_buf_dif at 0x%p\n",
+				(1L << _dump_buf_dif_order), _dump_buf_dif);
+		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
+	}
+}
+
+module_init(lpfc_init);
+module_exit(lpfc_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(LPFC_MODULE_DESC);
+MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
+MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_logmsg.h b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_logmsg.h
new file mode 100644
index 0000000..baf53e6
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -0,0 +1,58 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#define LOG_ELS		0x00000001	/* ELS events */
+#define LOG_DISCOVERY	0x00000002	/* Link discovery events */
+#define LOG_MBOX	0x00000004	/* Mailbox events */
+#define LOG_INIT	0x00000008	/* Initialization events */
+#define LOG_LINK_EVENT	0x00000010	/* Link events */
+#define LOG_IP		0x00000020	/* IP traffic history */
+#define LOG_FCP		0x00000040	/* FCP traffic history */
+#define LOG_NODE	0x00000080	/* Node table events */
+#define LOG_TEMP	0x00000100	/* Temperature sensor events */
+#define LOG_BG		0x00000200	/* BlockGuard events */
+#define LOG_MISC	0x00000400	/* Miscellaneous events */
+#define LOG_SLI		0x00000800	/* SLI events */
+#define LOG_FCP_ERROR	0x00001000	/* log errors, not underruns */
+#define LOG_LIBDFC	0x00002000	/* Libdfc events */
+#define LOG_VPORT	0x00004000	/* NPIV events */
+#define LOG_SECURITY	0x00008000	/* Security events */
+#define LOG_EVENT	0x00010000	/* CT,TEMP,DUMP, logging */
+#define LOG_FIP		0x00020000	/* FIP events */
+#define LOG_FCP_UNDER	0x00040000	/* FCP underruns errors */
+#define LOG_ALL_MSG	0xffffffff	/* LOG all messages */
+
+#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
+do { \
+	{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
+		dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
+			   fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \
+} while (0)
+
+#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
+do { \
+	{ uint32_t log_verbose = (phba)->pport ? \
+				 (phba)->pport->cfg_log_verbose : \
+				 (phba)->cfg_log_verbose; \
+	  if (((mask) & log_verbose) || (level[1] <= '3')) \
+		dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
+			   fmt, phba->brd_no, ##arg); \
+	} \
+} while (0)
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_mbox.c b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_mbox.c
new file mode 100644
index 0000000..20336f0
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_mbox.c
@@ -0,0 +1,2344 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi.h>
+#include <scsi/fc/fc_fs.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_compat.h"
+
+/**
+ * lpfc_dump_static_vport - Dump HBA's static vport information.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @offset: offset for dumping vport info.
+ *
+ * The dump mailbox command provides a method for the device driver to obtain
+ * various types of information from the HBA device.
+ *
+ * This routine prepares the mailbox command for dumping list of static
+ * vports to be created.
+ **/
+int
+lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
+		uint16_t offset)
+{
+	MAILBOX_t *mb;
+	struct lpfc_dmabuf *mp;
+
+	mb = &pmb->u.mb;
+
+	/* Setup to dump vport info region */
+	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+	mb->mbxCommand = MBX_DUMP_MEMORY;
+	mb->un.varDmp.type = DMP_NV_PARAMS;
+	mb->un.varDmp.entry_index = offset;
+	mb->un.varDmp.region_id = DMP_REGION_VPORT;
+	mb->mbxOwner = OWN_HOST;
+
+	/* For SLI3 HBAs data is embedded in mailbox */
+	if (phba->sli_rev != LPFC_SLI_REV4) {
+		mb->un.varDmp.cv = 1;
+		mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
+		return 0;
+	}
+
+	/* For SLI4 HBAs driver need to allocate memory */
+	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (mp)
+		mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+
+	if (!mp || !mp->virt) {
+		kfree(mp);
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+			"2605 lpfc_dump_static_vport: memory"
+			" allocation failed\n");
+		return 1;
+	}
+	memset(mp->virt, 0, LPFC_BPL_SIZE);
+	INIT_LIST_HEAD(&mp->list);
+	/* save address for completion */
+	pmb->context2 = (uint8_t *) mp;
+	mb->un.varWords[3] = putPaddrLow(mp->phys);
+	mb->un.varWords[4] = putPaddrHigh(mp->phys);
+	mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
+
+	return 0;
+}
+
+/**
+ * lpfc_down_link - Bring down HBAs link.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine prepares a mailbox command to bring down HBA link.
+ **/
+void
+lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb;
+	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+	mb = &pmb->u.mb;
+	mb->mbxCommand = MBX_DOWN_LINK;
+	mb->mbxOwner = OWN_HOST;
+}
+
+/**
+ * lpfc_dump_mem - Prepare a mailbox command for reading a region.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @offset: offset into the region.
+ * @region_id: config region id.
+ *
+ * The dump mailbox command provides a method for the device driver to obtain
+ * various types of information from the HBA device.
+ *
+ * This routine prepares the mailbox command for dumping HBA's config region.
+ **/
+void
+lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
+		uint16_t region_id)
+{
+	MAILBOX_t *mb;
+	void *ctx;
+
+	mb = &pmb->u.mb;
+	ctx = pmb->context2;
+
+	/* Setup to dump VPD region */
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+	mb->mbxCommand = MBX_DUMP_MEMORY;
+	mb->un.varDmp.cv = 1;
+	mb->un.varDmp.type = DMP_NV_PARAMS;
+	mb->un.varDmp.entry_index = offset;
+	mb->un.varDmp.region_id = region_id;
+	mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
+	mb->un.varDmp.co = 0;
+	mb->un.varDmp.resp_offset = 0;
+	pmb->context2 = ctx;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+/**
+ * lpfc_dump_wakeup_param - Prepare mailbox command for retrieving wakeup params
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This function create a dump memory mailbox command to dump wake up
+ * parameters.
+ */
+void
+lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb;
+	void *ctx;
+
+	mb = &pmb->u.mb;
+	/* Save context so that we can restore after memset */
+	ctx = pmb->context2;
+
+	/* Setup to dump VPD region */
+	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+	mb->mbxCommand = MBX_DUMP_MEMORY;
+	mb->mbxOwner = OWN_HOST;
+	mb->un.varDmp.cv = 1;
+	mb->un.varDmp.type = DMP_NV_PARAMS;
+	mb->un.varDmp.entry_index = 0;
+	mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
+	mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
+	mb->un.varDmp.co = 0;
+	mb->un.varDmp.resp_offset = 0;
+	pmb->context2 = ctx;
+	return;
+}
+
+/**
+ * lpfc_read_nv - Prepare a mailbox command for reading HBA's NVRAM param
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The read NVRAM mailbox command returns the HBA's non-volatile parameters
+ * that are used as defaults when the Fibre Channel link is brought on-line.
+ *
+ * This routine prepares the mailbox command for reading information stored
+ * in the HBA's NVRAM. Specifically, the HBA's WWNN and WWPN.
+ **/
+void
+lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+	MAILBOX_t *mb;
+
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+	mb->mbxCommand = MBX_READ_NV;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+/**
+ * lpfc_config_async - Prepare a mailbox command for enabling HBA async event
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @ring: ring number for the asynchronous event to be configured.
+ *
+ * The asynchronous event enable mailbox command is used to enable the
+ * asynchronous event posting via the ASYNC_STATUS_CN IOCB response and
+ * specifies the default ring to which events are posted.
+ *
+ * This routine prepares the mailbox command for enabling HBA asynchronous
+ * event support on a IOCB ring.
+ **/
+void
+lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
+		uint32_t ring)
+{
+	MAILBOX_t *mb;
+
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+	mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
+	mb->un.varCfgAsyncEvent.ring = ring;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+/**
+ * lpfc_heart_beat - Prepare a mailbox command for heart beat
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The heart beat mailbox command is used to detect an unresponsive HBA, which
+ * is defined as any device where no error attention is sent and both mailbox
+ * and rings are not processed.
+ *
+ * This routine prepares the mailbox command for issuing a heart beat in the
+ * form of mailbox command to the HBA. The timely completion of the heart
+ * beat mailbox command indicates the health of the HBA.
+ **/
+void
+lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+	MAILBOX_t *mb;
+
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+	mb->mbxCommand = MBX_HEARTBEAT;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+/**
+ * lpfc_read_topology - Prepare a mailbox command for reading HBA topology
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @mp: DMA buffer memory for reading the link attention information into.
+ *
+ * The read topology mailbox command is issued to read the link topology
+ * information indicated by the HBA port when the Link Event bit of the Host
+ * Attention (HSTATT) register is set to 1 (For SLI-3) or when an FC Link
+ * Attention ACQE is received from the port (For SLI-4). A Link Event
+ * Attention occurs based on an exception detected at the Fibre Channel link
+ * interface.
+ *
+ * This routine prepares the mailbox command for reading HBA link topology
+ * information. A DMA memory has been set aside and address passed to the
+ * HBA through @mp for the HBA to DMA link attention information into the
+ * memory as part of the execution of the mailbox command.
+ *
+ * Return codes
+ *    0 - Success (currently always return 0)
+ **/
+int
+lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
+		   struct lpfc_dmabuf *mp)
+{
+	MAILBOX_t *mb;
+	struct lpfc_sli *psli;
+
+	psli = &phba->sli;
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	INIT_LIST_HEAD(&mp->list);
+	mb->mbxCommand = MBX_READ_TOPOLOGY;
+	mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE;
+	mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
+	mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys);
+
+	/* Save address for later completion and set the owner to host so that
+	 * the FW knows this mailbox is available for processing.
+	 */
+	pmb->context1 = (uint8_t *)mp;
+	mb->mbxOwner = OWN_HOST;
+	return (0);
+}
+
+/**
+ * lpfc_clear_la - Prepare a mailbox command for clearing HBA link attention
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The clear link attention mailbox command is issued to clear the link event
+ * attention condition indicated by the Link Event bit of the Host Attention
+ * (HSTATT) register. The link event attention condition is cleared only if
+ * the event tag specified matches that of the current link event counter.
+ * The current event tag is read using the read link attention event mailbox
+ * command.
+ *
+ * This routine prepares the mailbox command for clearing HBA link attention
+ * information.
+ **/
+void
+lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+	MAILBOX_t *mb;
+
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	mb->un.varClearLA.eventTag = phba->fc_eventTag;
+	mb->mbxCommand = MBX_CLEAR_LA;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+/**
+ * lpfc_config_link - Prepare a mailbox command for configuring link on a HBA
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure link mailbox command is used before the initialize link
+ * mailbox command to override default value and to configure link-oriented
+ * parameters such as DID address and various timers. Typically, this
+ * command would be used after an F_Port login to set the returned DID address
+ * and the fabric timeout values. This command is not valid before a configure
+ * port command has configured the HBA port.
+ *
+ * This routine prepares the mailbox command for configuring link on a HBA.
+ **/
+void
+lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+	struct lpfc_vport  *vport = phba->pport;
+	MAILBOX_t *mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	/* NEW_FEATURE
+	 * SLI-2, Coalescing Response Feature.
+	 */
+	if (phba->cfg_cr_delay) {
+		mb->un.varCfgLnk.cr = 1;
+		mb->un.varCfgLnk.ci = 1;
+		mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
+		mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
+	}
+
+	mb->un.varCfgLnk.myId = vport->fc_myDID;
+	mb->un.varCfgLnk.edtov = phba->fc_edtov;
+	mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
+	mb->un.varCfgLnk.ratov = phba->fc_ratov;
+	mb->un.varCfgLnk.rttov = phba->fc_rttov;
+	mb->un.varCfgLnk.altov = phba->fc_altov;
+	mb->un.varCfgLnk.crtov = phba->fc_crtov;
+	mb->un.varCfgLnk.citov = phba->fc_citov;
+
+	if (phba->cfg_ack0)
+		mb->un.varCfgLnk.ack0_enable = 1;
+
+	mb->mbxCommand = MBX_CONFIG_LINK;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+/**
+ * lpfc_config_msi - Prepare a mailbox command for configuring msi-x
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure MSI-X mailbox command is used to configure the HBA's SLI-3
+ * MSI-X multi-message interrupt vector association to interrupt attention
+ * conditions.
+ *
+ * Return codes
+ *    0 - Success
+ *    -EINVAL - Failure
+ **/
+int
+lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+	uint32_t attentionConditions[2];
+
+	/* Sanity check */
+	if (phba->cfg_use_msi != 2) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0475 Not configured for supporting MSI-X "
+				"cfg_use_msi: 0x%x\n", phba->cfg_use_msi);
+		return -EINVAL;
+	}
+
+	if (phba->sli_rev < 3) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0476 HBA not supporting SLI-3 or later "
+				"SLI Revision: 0x%x\n", phba->sli_rev);
+		return -EINVAL;
+	}
+
+	/* Clear mailbox command fields */
+	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+
+	/*
+	 * SLI-3, Message Signaled Interrupt Fearure.
+	 */
+
+	/* Multi-message attention configuration */
+	attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT |
+				  HA_LATT | HA_MBATT);
+	attentionConditions[1] = 0;
+
+	mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0];
+	mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1];
+
+	/*
+	 * Set up message number to HA bit association
+	 */
+#ifdef __BIG_ENDIAN_BITFIELD
+	/* RA0 (FCP Ring) */
+	mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1;
+	/* RA1 (Other Protocol Extra Ring) */
+	mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1;
+#else   /*  __LITTLE_ENDIAN_BITFIELD */
+	/* RA0 (FCP Ring) */
+	mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1;
+	/* RA1 (Other Protocol Extra Ring) */
+	mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1;
+#endif
+	/* Multi-message interrupt autoclear configuration*/
+	mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0];
+	mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1];
+
+	/* For now, HBA autoclear does not work reliably, disable it */
+	mb->un.varCfgMSI.autoClearHA[0] = 0;
+	mb->un.varCfgMSI.autoClearHA[1] = 0;
+
+	/* Set command and owner bit */
+	mb->mbxCommand = MBX_CONFIG_MSI;
+	mb->mbxOwner = OWN_HOST;
+
+	return 0;
+}
+
+/**
+ * lpfc_init_link - Prepare a mailbox command for initialize link on a HBA
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @topology: the link topology for the link to be initialized to.
+ * @linkspeed: the link speed for the link to be initialized to.
+ *
+ * The initialize link mailbox command is used to initialize the Fibre
+ * Channel link. This command must follow a configure port command that
+ * establishes the mode of operation.
+ *
+ * This routine prepares the mailbox command for initializing link on a HBA
+ * with the specified link topology and speed.
+ **/
+void
+lpfc_init_link(struct lpfc_hba * phba,
+	       LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
+{
+	lpfc_vpd_t *vpd;
+	struct lpfc_sli *psli;
+	MAILBOX_t *mb;
+
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	psli = &phba->sli;
+	switch (topology) {
+	case FLAGS_TOPOLOGY_MODE_LOOP_PT:
+		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
+		mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
+		break;
+	case FLAGS_TOPOLOGY_MODE_PT_PT:
+		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
+		break;
+	case FLAGS_TOPOLOGY_MODE_LOOP:
+		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
+		break;
+	case FLAGS_TOPOLOGY_MODE_PT_LOOP:
+		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
+		mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
+		break;
+	case FLAGS_LOCAL_LB:
+		mb->un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
+		break;
+	}
+
+	/* Enable asynchronous ABTS responses from firmware */
+	mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
+
+	/* NEW_FEATURE
+	 * Setting up the link speed
+	 */
+	vpd = &phba->vpd;
+	if (vpd->rev.feaLevelHigh >= 0x02){
+		switch(linkspeed){
+		case LPFC_USER_LINK_SPEED_1G:
+			mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_1G;
+			break;
+		case LPFC_USER_LINK_SPEED_2G:
+			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_2G;
+			break;
+		case LPFC_USER_LINK_SPEED_4G:
+			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_4G;
+			break;
+		case LPFC_USER_LINK_SPEED_8G:
+			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_8G;
+			break;
+		case LPFC_USER_LINK_SPEED_10G:
+			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_10G;
+			break;
+		case LPFC_USER_LINK_SPEED_16G:
+			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_16G;
+			break;
+		case LPFC_USER_LINK_SPEED_AUTO:
+		default:
+			mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
+			break;
+		}
+
+	}
+	else
+		mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
+
+	mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK;
+	mb->mbxOwner = OWN_HOST;
+	mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA;
+	return;
+}
+
+/**
+ * lpfc_read_sparam - Prepare a mailbox command for reading HBA parameters
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @vpi: virtual N_Port identifier.
+ *
+ * The read service parameter mailbox command is used to read the HBA port
+ * service parameters. The service parameters are read into the buffer
+ * specified directly by a BDE in the mailbox command. These service
+ * parameters may then be used to build the payload of an N_Port/F_POrt
+ * login request and reply (LOGI/ACC).
+ *
+ * This routine prepares the mailbox command for reading HBA port service
+ * parameters. The DMA memory is allocated in this function and the addresses
+ * are populated into the mailbox command for the HBA to DMA the service
+ * parameters into.
+ *
+ * Return codes
+ *    0 - Success
+ *    1 - DMA memory allocation failed
+ **/
+int
+lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
+{
+	struct lpfc_dmabuf *mp;
+	MAILBOX_t *mb;
+	struct lpfc_sli *psli;
+
+	psli = &phba->sli;
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	mb->mbxOwner = OWN_HOST;
+
+	/* Get a buffer to hold the HBAs Service Parameters */
+
+	mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+	if (mp)
+		mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+	if (!mp || !mp->virt) {
+		kfree(mp);
+		mb->mbxCommand = MBX_READ_SPARM64;
+		/* READ_SPARAM: no buffers */
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+			        "0301 READ_SPARAM: no buffers\n");
+		return (1);
+	}
+	INIT_LIST_HEAD(&mp->list);
+	mb->mbxCommand = MBX_READ_SPARM64;
+	mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
+	mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
+	mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
+	if (phba->sli_rev >= LPFC_SLI_REV3)
+		mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
+
+	/* save address for completion */
+	pmb->context1 = mp;
+
+	return (0);
+}
+
+/**
+ * lpfc_unreg_did - Prepare a mailbox command for unregistering DID
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @did: remote port identifier.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The unregister DID mailbox command is used to unregister an N_Port/F_Port
+ * login for an unknown RPI by specifying the DID of a remote port. This
+ * command frees an RPI context in the HBA port. This has the effect of
+ * performing an implicit N_Port/F_Port logout.
+ *
+ * This routine prepares the mailbox command for unregistering a remote
+ * N_Port/F_Port (DID) login.
+ **/
+void
+lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
+	       LPFC_MBOXQ_t * pmb)
+{
+	MAILBOX_t *mb;
+
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	mb->un.varUnregDID.did = did;
+	mb->un.varUnregDID.vpi = vpi;
+	if ((vpi != 0xffff) &&
+	    (phba->sli_rev == LPFC_SLI_REV4))
+		mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
+
+	mb->mbxCommand = MBX_UNREG_D_ID;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+/**
+ * lpfc_read_config - Prepare a mailbox command for reading HBA configuration
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The read configuration mailbox command is used to read the HBA port
+ * configuration parameters. This mailbox command provides a method for
+ * seeing any parameters that may have changed via various configuration
+ * mailbox commands.
+ *
+ * This routine prepares the mailbox command for reading out HBA configuration
+ * parameters.
+ **/
+void
+lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+	MAILBOX_t *mb;
+
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	mb->mbxCommand = MBX_READ_CONFIG;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+/**
+ * lpfc_read_lnk_stat - Prepare a mailbox command for reading HBA link stats
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The read link status mailbox command is used to read the link status from
+ * the HBA. Link status includes all link-related error counters. These
+ * counters are maintained by the HBA and originated in the link hardware
+ * unit. Note that all of these counters wrap.
+ *
+ * This routine prepares the mailbox command for reading out HBA link status.
+ **/
+void
+lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+	MAILBOX_t *mb;
+
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	mb->mbxCommand = MBX_READ_LNK_STAT;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+/**
+ * lpfc_reg_rpi - Prepare a mailbox command for registering remote login
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @did: remote port identifier.
+ * @param: pointer to memory holding the server parameters.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @rpi: the rpi to use in the registration (usually only used for SLI4.
+ *
+ * The registration login mailbox command is used to register an N_Port or
+ * F_Port login. This registration allows the HBA to cache the remote N_Port
+ * service parameters internally and thereby make the appropriate FC-2
+ * decisions. The remote port service parameters are handed off by the driver
+ * to the HBA using a descriptor entry that directly identifies a buffer in
+ * host memory. In exchange, the HBA returns an RPI identifier.
+ *
+ * This routine prepares the mailbox command for registering remote port login.
+ * The function allocates DMA buffer for passing the service parameters to the
+ * HBA with the mailbox command.
+ *
+ * Return codes
+ *    0 - Success
+ *    1 - DMA memory allocation failed
+ **/
+int
+lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
+	     uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+	uint8_t *sparam;
+	struct lpfc_dmabuf *mp;
+
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	mb->un.varRegLogin.rpi = 0;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
+	if (phba->sli_rev >= LPFC_SLI_REV3)
+		mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
+	mb->un.varRegLogin.did = did;
+	mb->mbxOwner = OWN_HOST;
+	/* Get a buffer to hold NPorts Service Parameters */
+	mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+	if (mp)
+		mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+	if (!mp || !mp->virt) {
+		kfree(mp);
+		mb->mbxCommand = MBX_REG_LOGIN64;
+		/* REG_LOGIN: no buffers */
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+				"0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
+				"rpi x%x\n", vpi, did, rpi);
+		return 1;
+	}
+	INIT_LIST_HEAD(&mp->list);
+	sparam = mp->virt;
+
+	/* Copy param's into a new buffer */
+	memcpy(sparam, param, sizeof (struct serv_parm));
+
+	/* save address for completion */
+	pmb->context1 = (uint8_t *) mp;
+
+	mb->mbxCommand = MBX_REG_LOGIN64;
+	mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
+	mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
+	mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
+
+	return 0;
+}
+
+/**
+ * lpfc_unreg_login - Prepare a mailbox command for unregistering remote login
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @rpi: remote port identifier
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The unregistration login mailbox command is used to unregister an N_Port
+ * or F_Port login. This command frees an RPI context in the HBA. It has the
+ * effect of performing an implicit N_Port/F_Port logout.
+ *
+ * This routine prepares the mailbox command for unregistering remote port
+ * login.
+ *
+ * For SLI4 ports, the rpi passed to this function must be the physical
+ * rpi value, not the logical index.
+ **/
+void
+lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
+		 LPFC_MBOXQ_t * pmb)
+{
+	MAILBOX_t *mb;
+
+	mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	mb->un.varUnregLogin.rpi = rpi;
+	mb->un.varUnregLogin.rsvd1 = 0;
+	if (phba->sli_rev >= LPFC_SLI_REV3)
+		mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
+
+	mb->mbxCommand = MBX_UNREG_LOGIN;
+	mb->mbxOwner = OWN_HOST;
+
+	return;
+}
+
+/**
+ * lpfc_sli4_unreg_all_rpis - unregister all RPIs for a vport on SLI4 HBA.
+ * @vport: pointer to a vport object.
+ *
+ * This routine sends mailbox command to unregister all active RPIs for
+ * a vport.
+ **/
+void
+lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
+{
+	struct lpfc_hba  *phba  = vport->phba;
+	LPFC_MBOXQ_t     *mbox;
+	int rc;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (mbox) {
+		/*
+		 * For SLI4 functions, the rpi field is overloaded for
+		 * the vport context unreg all.  This routine passes
+		 * 0 for the rpi field in lpfc_unreg_login for compatibility
+		 * with SLI3 and then overrides the rpi field with the
+		 * expected value for SLI4.
+		 */
+		lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
+				 mbox);
+		mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
+		mbox->vport = vport;
+		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		mbox->context1 = NULL;
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+		if (rc == MBX_NOT_FINISHED)
+			mempool_free(mbox, phba->mbox_mem_pool);
+	}
+}
+
+/**
+ * lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @sid: Fibre Channel S_ID (N_Port_ID assigned to a virtual N_Port).
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The registration vport identifier mailbox command is used to activate a
+ * virtual N_Port after it has acquired an N_Port_ID. The HBA validates the
+ * N_Port_ID against the information in the selected virtual N_Port context
+ * block and marks it active to allow normal processing of IOCB commands and
+ * received unsolicited exchanges.
+ *
+ * This routine prepares the mailbox command for registering a virtual N_Port.
+ **/
+void
+lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+	struct lpfc_hba *phba = vport->phba;
+
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+	/*
+	 * Set the re-reg VPI bit for f/w to update the MAC address.
+	 */
+	if ((phba->sli_rev == LPFC_SLI_REV4) &&
+		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
+		mb->un.varRegVpi.upd = 1;
+
+	mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
+	mb->un.varRegVpi.sid = vport->fc_myDID;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
+	else
+		mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
+	memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
+	       sizeof(struct lpfc_name));
+	mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
+	mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]);
+
+	mb->mbxCommand = MBX_REG_VPI;
+	mb->mbxOwner = OWN_HOST;
+	return;
+
+}
+
+/**
+ * lpfc_unreg_vpi - Prepare a mailbox command for unregistering vport id
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The unregistration vport identifier mailbox command is used to inactivate
+ * a virtual N_Port. The driver must have logged out and unregistered all
+ * remote N_Ports to abort any activity on the virtual N_Port. The HBA will
+ * unregisters any default RPIs associated with the specified vpi, aborting
+ * any active exchanges. The HBA will post the mailbox response after making
+ * the virtual N_Port inactive.
+ *
+ * This routine prepares the mailbox command for unregistering a virtual
+ * N_Port.
+ **/
+void
+lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	if (phba->sli_rev == LPFC_SLI_REV3)
+		mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
+	else if (phba->sli_rev >= LPFC_SLI_REV4)
+		mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
+
+	mb->mbxCommand = MBX_UNREG_VPI;
+	mb->mbxOwner = OWN_HOST;
+	return;
+
+}
+
+/**
+ * lpfc_config_pcb_setup - Set up IOCB rings in the Port Control Block (PCB)
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine sets up and initializes the IOCB rings in the Port Control
+ * Block (PCB).
+ **/
+static void
+lpfc_config_pcb_setup(struct lpfc_hba * phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *pring;
+	PCB_t *pcbp = phba->pcb;
+	dma_addr_t pdma_addr;
+	uint32_t offset;
+	uint32_t iocbCnt = 0;
+	int i;
+
+	pcbp->maxRing = (psli->num_rings - 1);
+
+	for (i = 0; i < psli->num_rings; i++) {
+		pring = &psli->ring[i];
+
+		pring->sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE:
+							SLI2_IOCB_CMD_SIZE;
+		pring->sizeRiocb = phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE:
+							SLI2_IOCB_RSP_SIZE;
+		/* A ring MUST have both cmd and rsp entries defined to be
+		   valid */
+		if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) {
+			pcbp->rdsc[i].cmdEntries = 0;
+			pcbp->rdsc[i].rspEntries = 0;
+			pcbp->rdsc[i].cmdAddrHigh = 0;
+			pcbp->rdsc[i].rspAddrHigh = 0;
+			pcbp->rdsc[i].cmdAddrLow = 0;
+			pcbp->rdsc[i].rspAddrLow = 0;
+			pring->cmdringaddr = NULL;
+			pring->rspringaddr = NULL;
+			continue;
+		}
+		/* Command ring setup for ring */
+		pring->cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
+		pcbp->rdsc[i].cmdEntries = pring->numCiocb;
+
+		offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
+			 (uint8_t *) phba->slim2p.virt;
+		pdma_addr = phba->slim2p.phys + offset;
+		pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
+		pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
+		iocbCnt += pring->numCiocb;
+
+		/* Response ring setup for ring */
+		pring->rspringaddr = (void *) &phba->IOCBs[iocbCnt];
+
+		pcbp->rdsc[i].rspEntries = pring->numRiocb;
+		offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
+			 (uint8_t *)phba->slim2p.virt;
+		pdma_addr = phba->slim2p.phys + offset;
+		pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
+		pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
+		iocbCnt += pring->numRiocb;
+	}
+}
+
+/**
+ * lpfc_read_rev - Prepare a mailbox command for reading HBA revision
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The read revision mailbox command is used to read the revision levels of
+ * the HBA components. These components include hardware units, resident
+ * firmware, and available firmware. HBAs that supports SLI-3 mode of
+ * operation provide different response information depending on the version
+ * requested by the driver.
+ *
+ * This routine prepares the mailbox command for reading HBA revision
+ * information.
+ **/
+void
+lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+	mb->un.varRdRev.cv = 1;
+	mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
+	mb->mbxCommand = MBX_READ_REV;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+void
+lpfc_sli4_swap_str(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+	struct lpfc_mqe *mqe;
+
+	switch (mb->mbxCommand) {
+	case  MBX_READ_REV:
+		 mqe = &pmb->u.mqe;
+		lpfc_sli_pcimem_bcopy(mqe->un.read_rev.fw_name,
+				 mqe->un.read_rev.fw_name, 16);
+		lpfc_sli_pcimem_bcopy(mqe->un.read_rev.ulp_fw_name,
+				 mqe->un.read_rev.ulp_fw_name, 16);
+		break;
+	default:
+		break;
+	}
+	return;
+}
+
+/**
+ * lpfc_build_hbq_profile2 - Set up the HBQ Selection Profile 2
+ * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
+ * @hbq_desc: pointer to the HBQ selection profile descriptor.
+ *
+ * The Host Buffer Queue (HBQ) Selection Profile 2 specifies that the HBA
+ * tests the incoming frames' R_CTL/TYPE fields with works 10:15 and performs
+ * the Sequence Length Test using the fields in the Selection Profile 2
+ * extension in words 20:31.
+ **/
+static void
+lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
+			struct lpfc_hbq_init  *hbq_desc)
+{
+	hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt;
+	hbqmb->profiles.profile2.maxlen     = hbq_desc->maxlen;
+	hbqmb->profiles.profile2.seqlenoff  = hbq_desc->seqlenoff;
+}
+
+/**
+ * lpfc_build_hbq_profile3 - Set up the HBQ Selection Profile 3
+ * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
+ * @hbq_desc: pointer to the HBQ selection profile descriptor.
+ *
+ * The Host Buffer Queue (HBQ) Selection Profile 3 specifies that the HBA
+ * tests the incoming frame's R_CTL/TYPE fields with words 10:15 and performs
+ * the Sequence Length Test and Byte Field Test using the fields in the
+ * Selection Profile 3 extension in words 20:31.
+ **/
+static void
+lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
+			struct lpfc_hbq_init  *hbq_desc)
+{
+	hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt;
+	hbqmb->profiles.profile3.maxlen     = hbq_desc->maxlen;
+	hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff;
+	hbqmb->profiles.profile3.seqlenoff  = hbq_desc->seqlenoff;
+	memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch,
+	       sizeof(hbqmb->profiles.profile3.cmdmatch));
+}
+
+/**
+ * lpfc_build_hbq_profile5 - Set up the HBQ Selection Profile 5
+ * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
+ * @hbq_desc: pointer to the HBQ selection profile descriptor.
+ *
+ * The Host Buffer Queue (HBQ) Selection Profile 5 specifies a header HBQ. The
+ * HBA tests the initial frame of an incoming sequence using the frame's
+ * R_CTL/TYPE fields with words 10:15 and performs the Sequence Length Test
+ * and Byte Field Test using the fields in the Selection Profile 5 extension
+ * words 20:31.
+ **/
+static void
+lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
+			struct lpfc_hbq_init  *hbq_desc)
+{
+	hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt;
+	hbqmb->profiles.profile5.maxlen     = hbq_desc->maxlen;
+	hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff;
+	hbqmb->profiles.profile5.seqlenoff  = hbq_desc->seqlenoff;
+	memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch,
+	       sizeof(hbqmb->profiles.profile5.cmdmatch));
+}
+
+/**
+ * lpfc_config_hbq - Prepare a mailbox command for configuring an HBQ
+ * @phba: pointer to lpfc hba data structure.
+ * @id: HBQ identifier.
+ * @hbq_desc: pointer to the HBA descriptor data structure.
+ * @hbq_entry_index: index of the HBQ entry data structures.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure HBQ (Host Buffer Queue) mailbox command is used to configure
+ * an HBQ. The configuration binds events that require buffers to a particular
+ * ring and HBQ based on a selection profile.
+ *
+ * This routine prepares the mailbox command for configuring an HBQ.
+ **/
+void
+lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
+		 struct lpfc_hbq_init *hbq_desc,
+		uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
+{
+	int i;
+	MAILBOX_t *mb = &pmb->u.mb;
+	struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
+
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+	hbqmb->hbqId = id;
+	hbqmb->entry_count = hbq_desc->entry_count;   /* # entries in HBQ */
+	hbqmb->recvNotify = hbq_desc->rn;             /* Receive
+						       * Notification */
+	hbqmb->numMask    = hbq_desc->mask_count;     /* # R_CTL/TYPE masks
+						       * # in words 0-19 */
+	hbqmb->profile    = hbq_desc->profile;	      /* Selection profile:
+						       * 0 = all,
+						       * 7 = logentry */
+	hbqmb->ringMask   = hbq_desc->ring_mask;      /* Binds HBQ to a ring
+						       * e.g. Ring0=b0001,
+						       * ring2=b0100 */
+	hbqmb->headerLen  = hbq_desc->headerLen;      /* 0 if not profile 4
+						       * or 5 */
+	hbqmb->logEntry   = hbq_desc->logEntry;       /* Set to 1 if this
+						       * HBQ will be used
+						       * for LogEntry
+						       * buffers */
+	hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) +
+		hbq_entry_index * sizeof(struct lpfc_hbq_entry);
+	hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys);
+
+	mb->mbxCommand = MBX_CONFIG_HBQ;
+	mb->mbxOwner = OWN_HOST;
+
+				/* Copy info for profiles 2,3,5. Other
+				 * profiles this area is reserved
+				 */
+	if (hbq_desc->profile == 2)
+		lpfc_build_hbq_profile2(hbqmb, hbq_desc);
+	else if (hbq_desc->profile == 3)
+		lpfc_build_hbq_profile3(hbqmb, hbq_desc);
+	else if (hbq_desc->profile == 5)
+		lpfc_build_hbq_profile5(hbqmb, hbq_desc);
+
+	/* Return if no rctl / type masks for this HBQ */
+	if (!hbq_desc->mask_count)
+		return;
+
+	/* Otherwise we setup specific rctl / type masks for this HBQ */
+	for (i = 0; i < hbq_desc->mask_count; i++) {
+		hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch;
+		hbqmb->hbqMasks[i].tmask  = hbq_desc->hbqMasks[i].tmask;
+		hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch;
+		hbqmb->hbqMasks[i].rctlmask  = hbq_desc->hbqMasks[i].rctlmask;
+	}
+
+	return;
+}
+
+/**
+ * lpfc_config_ring - Prepare a mailbox command for configuring an IOCB ring
+ * @phba: pointer to lpfc hba data structure.
+ * @ring:
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure ring mailbox command is used to configure an IOCB ring. This
+ * configuration binds from one to six of HBA RC_CTL/TYPE mask entries to the
+ * ring. This is used to map incoming sequences to a particular ring whose
+ * RC_CTL/TYPE mask entry matches that of the sequence. The driver should not
+ * attempt to configure a ring whose number is greater than the number
+ * specified in the Port Control Block (PCB). It is an error to issue the
+ * configure ring command more than once with the same ring number. The HBA
+ * returns an error if the driver attempts this.
+ *
+ * This routine prepares the mailbox command for configuring IOCB ring.
+ **/
+void
+lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
+{
+	int i;
+	MAILBOX_t *mb = &pmb->u.mb;
+	struct lpfc_sli *psli;
+	struct lpfc_sli_ring *pring;
+
+	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+	mb->un.varCfgRing.ring = ring;
+	mb->un.varCfgRing.maxOrigXchg = 0;
+	mb->un.varCfgRing.maxRespXchg = 0;
+	mb->un.varCfgRing.recvNotify = 1;
+
+	psli = &phba->sli;
+	pring = &psli->ring[ring];
+	mb->un.varCfgRing.numMask = pring->num_mask;
+	mb->mbxCommand = MBX_CONFIG_RING;
+	mb->mbxOwner = OWN_HOST;
+
+	/* Is this ring configured for a specific profile */
+	if (pring->prt[0].profile) {
+		mb->un.varCfgRing.profile = pring->prt[0].profile;
+		return;
+	}
+
+	/* Otherwise we setup specific rctl / type masks for this ring */
+	for (i = 0; i < pring->num_mask; i++) {
+		mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
+		if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ)
+			mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
+		else
+			mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
+		mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type;
+		mb->un.varCfgRing.rrRegs[i].tmask = 0xff;
+	}
+
+	return;
+}
+
+/**
+ * lpfc_config_port - Prepare a mailbox command for configuring port
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure port mailbox command is used to identify the Port Control
+ * Block (PCB) in the driver memory. After this command is issued, the
+ * driver must not access the mailbox in the HBA without first resetting
+ * the HBA. The HBA may copy the PCB information to internal storage for
+ * subsequent use; the driver can not change the PCB information unless it
+ * resets the HBA.
+ *
+ * This routine prepares the mailbox command for configuring port.
+ **/
+void
+lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
+	MAILBOX_t *mb = &pmb->u.mb;
+	dma_addr_t pdma_addr;
+	uint32_t bar_low, bar_high;
+	size_t offset;
+	struct lpfc_hgp hgp;
+	int i;
+	uint32_t pgp_offset;
+
+	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+	mb->mbxCommand = MBX_CONFIG_PORT;
+	mb->mbxOwner = OWN_HOST;
+
+	mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
+
+	offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt;
+	pdma_addr = phba->slim2p.phys + offset;
+	mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
+	mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
+
+	/* Always Host Group Pointer is in SLIM */
+	mb->un.varCfgPort.hps = 1;
+
+	/* If HBA supports SLI=3 ask for it */
+
+	if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
+		if (phba->cfg_enable_bg)
+			mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
+		if (phba->cfg_enable_dss)
+			mb->un.varCfgPort.cdss = 1; /* Configure Security */
+		mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
+		mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
+		mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
+		if (phba->max_vpi && phba->cfg_enable_npiv &&
+		    phba->vpd.sli3Feat.cmv) {
+			mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
+			mb->un.varCfgPort.cmv = 1;
+		} else
+			mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
+	} else
+		phba->sli_rev = LPFC_SLI_REV2;
+	mb->un.varCfgPort.sli_mode = phba->sli_rev;
+
+	/* If this is an SLI3 port, configure async status notification. */
+	if (phba->sli_rev == LPFC_SLI_REV3)
+		mb->un.varCfgPort.casabt = 1;
+
+	/* Now setup pcb */
+	phba->pcb->type = TYPE_NATIVE_SLI2;
+	phba->pcb->feature = FEATURE_INITIAL_SLI2;
+
+	/* Setup Mailbox pointers */
+	phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE;
+	offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
+	pdma_addr = phba->slim2p.phys + offset;
+	phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
+	phba->pcb->mbAddrLow = putPaddrLow(pdma_addr);
+
+	/*
+	 * Setup Host Group ring pointer.
+	 *
+	 * For efficiency reasons, the ring get/put pointers can be
+	 * placed in adapter memory (SLIM) rather than in host memory.
+	 * This allows firmware to avoid PCI reads/writes when updating
+	 * and checking pointers.
+	 *
+	 * The firmware recognizes the use of SLIM memory by comparing
+	 * the address of the get/put pointers structure with that of
+	 * the SLIM BAR (BAR0).
+	 *
+	 * Caution: be sure to use the PCI config space value of BAR0/BAR1
+	 * (the hardware's view of the base address), not the OS's
+	 * value of pci_resource_start() as the OS value may be a cookie
+	 * for ioremap/iomap.
+	 */
+
+
+	pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
+	pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
+
+	/*
+	 * Set up HGP - Port Memory
+	 *
+	 * The port expects the host get/put pointers to reside in memory
+	 * following the "non-diagnostic" mode mailbox (32 words, 0x80 bytes)
+	 * area of SLIM.  In SLI-2 mode, there's an additional 16 reserved
+	 * words (0x40 bytes).  This area is not reserved if HBQs are
+	 * configured in SLI-3.
+	 *
+	 * CR0Put    - SLI2(no HBQs) = 0xc0, With HBQs = 0x80
+	 * RR0Get                      0xc4              0x84
+	 * CR1Put                      0xc8              0x88
+	 * RR1Get                      0xcc              0x8c
+	 * CR2Put                      0xd0              0x90
+	 * RR2Get                      0xd4              0x94
+	 * CR3Put                      0xd8              0x98
+	 * RR3Get                      0xdc              0x9c
+	 *
+	 * Reserved                    0xa0-0xbf
+	 *    If HBQs configured:
+	 *                         HBQ 0 Put ptr  0xc0
+	 *                         HBQ 1 Put ptr  0xc4
+	 *                         HBQ 2 Put ptr  0xc8
+	 *                         ......
+	 *                         HBQ(M-1)Put Pointer 0xc0+(M-1)*4
+	 *
+	 */
+
+	if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
+		phba->host_gp = &phba->mbox->us.s2.host[0];
+		phba->hbq_put = NULL;
+		offset = (uint8_t *)&phba->mbox->us.s2.host -
+			(uint8_t *)phba->slim2p.virt;
+		pdma_addr = phba->slim2p.phys + offset;
+		phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr);
+		phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr);
+	} else {
+		/* Always Host Group Pointer is in SLIM */
+		mb->un.varCfgPort.hps = 1;
+
+		if (phba->sli_rev == 3) {
+			phba->host_gp = &mb_slim->us.s3.host[0];
+			phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
+		} else {
+			phba->host_gp = &mb_slim->us.s2.host[0];
+			phba->hbq_put = NULL;
+		}
+
+		/* mask off BAR0's flag bits 0 - 3 */
+		phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
+			(void __iomem *)phba->host_gp -
+			(void __iomem *)phba->MBslimaddr;
+		if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
+			phba->pcb->hgpAddrHigh = bar_high;
+		else
+			phba->pcb->hgpAddrHigh = 0;
+		/* write HGP data to SLIM at the required longword offset */
+		memset(&hgp, 0, sizeof(struct lpfc_hgp));
+
+		for (i = 0; i < phba->sli.num_rings; i++) {
+			lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
+				    sizeof(*phba->host_gp));
+		}
+	}
+
+	/* Setup Port Group offset */
+	if (phba->sli_rev == 3)
+		pgp_offset = offsetof(struct lpfc_sli2_slim,
+				      mbx.us.s3_pgp.port);
+	else
+		pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
+	pdma_addr = phba->slim2p.phys + pgp_offset;
+	phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
+	phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr);
+
+	/* Use callback routine to setp rings in the pcb */
+	lpfc_config_pcb_setup(phba);
+
+	/* special handling for LC HBAs */
+	if (lpfc_is_LC_HBA(phba->pcidev->device)) {
+		uint32_t hbainit[5];
+
+		lpfc_hba_init(phba, hbainit);
+
+		memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20);
+	}
+
+	/* Swap PCB if needed */
+	lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t));
+}
+
+/**
+ * lpfc_kill_board - Prepare a mailbox command for killing board
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The kill board mailbox command is used to tell firmware to perform a
+ * graceful shutdown of a channel on a specified board to prepare for reset.
+ * When the kill board mailbox command is received, the ER3 bit is set to 1
+ * in the Host Status register and the ER Attention bit is set to 1 in the
+ * Host Attention register of the HBA function that received the kill board
+ * command.
+ *
+ * This routine prepares the mailbox command for killing the board in
+ * preparation for a graceful shutdown.
+ **/
+void
+lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+	MAILBOX_t *mb = &pmb->u.mb;
+
+	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+	mb->mbxCommand = MBX_KILL_BOARD;
+	mb->mbxOwner = OWN_HOST;
+	return;
+}
+
+/**
+ * lpfc_mbox_put - Put a mailbox cmd into the tail of driver's mailbox queue
+ * @phba: pointer to lpfc hba data structure.
+ * @mbq: pointer to the driver internal queue element for mailbox command.
+ *
+ * Driver maintains a internal mailbox command queue implemented as a linked
+ * list. When a mailbox command is issued, it shall be put into the mailbox
+ * command queue such that they shall be processed orderly as HBA can process
+ * one mailbox command at a time.
+ **/
+void
+lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
+{
+	struct lpfc_sli *psli;
+
+	psli = &phba->sli;
+
+	list_add_tail(&mbq->list, &psli->mboxq);
+
+	psli->mboxq_cnt++;
+
+	return;
+}
+
+/**
+ * lpfc_mbox_get - Remove a mailbox cmd from the head of driver's mailbox queue
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * Driver maintains a internal mailbox command queue implemented as a linked
+ * list. When a mailbox command is issued, it shall be put into the mailbox
+ * command queue such that they shall be processed orderly as HBA can process
+ * one mailbox command at a time. After HBA finished processing a mailbox
+ * command, the driver will remove a pending mailbox command from the head of
+ * the mailbox command queue and send to the HBA for processing.
+ *
+ * Return codes
+ *    pointer to the driver internal queue element for mailbox command.
+ **/
+LPFC_MBOXQ_t *
+lpfc_mbox_get(struct lpfc_hba * phba)
+{
+	LPFC_MBOXQ_t *mbq = NULL;
+	struct lpfc_sli *psli = &phba->sli;
+
+	list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list);
+	if (mbq)
+		psli->mboxq_cnt--;
+
+	return mbq;
+}
+
+/**
+ * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list
+ * @phba: pointer to lpfc hba data structure.
+ * @mbq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine put the completed mailbox command into the mailbox command
+ * complete list. This is the unlocked version of the routine. The mailbox
+ * complete list is used by the driver worker thread to process mailbox
+ * complete callback functions outside the driver interrupt handler.
+ **/
+void
+__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
+{
+	list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
+}
+
+/**
+ * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list
+ * @phba: pointer to lpfc hba data structure.
+ * @mbq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine put the completed mailbox command into the mailbox command
+ * complete list. This is the locked version of the routine. The mailbox
+ * complete list is used by the driver worker thread to process mailbox
+ * complete callback functions outside the driver interrupt handler.
+ **/
+void
+lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
+{
+	unsigned long iflag;
+
+	/* This function expects to be called from interrupt context */
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	__lpfc_mbox_cmpl_put(phba, mbq);
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return;
+}
+
+/**
+ * lpfc_mbox_cmd_check - Check the validality of a mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is to check whether a mailbox command is valid to be issued.
+ * This check will be performed by both the mailbox issue API when a client
+ * is to issue a mailbox command to the mailbox transport.
+ *
+ * Return 0 - pass the check, -ENODEV - fail the check
+ **/
+int
+lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	/* Mailbox command that have a completion handler must also have a
+	 * vport specified.
+	 */
+	if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
+	    mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
+		if (!mboxq->vport) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
+					"1814 Mbox x%x failed, no vport\n",
+					mboxq->u.mb.mbxCommand);
+			dump_stack();
+			return -ENODEV;
+		}
+	}
+	return 0;
+}
+
+/**
+ * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to check whether the HBA device is ready for posting a
+ * mailbox command. It is used by the mailbox transport API at the time the
+ * to post a mailbox command to the device.
+ *
+ * Return 0 - pass the check, -ENODEV - fail the check
+ **/
+int
+lpfc_mbox_dev_check(struct lpfc_hba *phba)
+{
+	/* If the PCI channel is in offline state, do not issue mbox */
+	if (unlikely(pci_channel_offline(phba->pcidev)))
+		return -ENODEV;
+
+	/* If the HBA is in error state, do not issue mbox */
+	if (phba->link_state == LPFC_HBA_ERROR)
+		return -ENODEV;
+
+	return 0;
+}
+
+/**
+ * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value
+ * @phba: pointer to lpfc hba data structure.
+ * @cmd: mailbox command code.
+ *
+ * This routine retrieves the proper timeout value according to the mailbox
+ * command code.
+ *
+ * Return codes
+ *    Timeout value to be used for the given mailbox command
+ **/
+int
+lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	MAILBOX_t *mbox = &mboxq->u.mb;
+	uint8_t subsys, opcode;
+
+	switch (mbox->mbxCommand) {
+	case MBX_WRITE_NV:	/* 0x03 */
+	case MBX_UPDATE_CFG:	/* 0x1B */
+	case MBX_DOWN_LOAD:	/* 0x1C */
+	case MBX_DEL_LD_ENTRY:	/* 0x1D */
+	case MBX_LOAD_AREA:	/* 0x81 */
+	case MBX_WRITE_WWN:     /* 0x98 */
+	case MBX_LOAD_EXP_ROM:	/* 0x9C */
+		return LPFC_MBOX_TMO_FLASH_CMD;
+	case MBX_SLI4_CONFIG:	/* 0x9b */
+		subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq);
+		opcode = lpfc_sli_config_mbox_opcode_get(phba, mboxq);
+		if (subsys == LPFC_MBOX_SUBSYSTEM_COMMON) {
+			switch (opcode) {
+			case LPFC_MBOX_OPCODE_READ_OBJECT:
+			case LPFC_MBOX_OPCODE_WRITE_OBJECT:
+			case LPFC_MBOX_OPCODE_READ_OBJECT_LIST:
+			case LPFC_MBOX_OPCODE_DELETE_OBJECT:
+			case LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG:
+			case LPFC_MBOX_OPCODE_GET_PROFILE_LIST:
+			case LPFC_MBOX_OPCODE_SET_ACT_PROFILE:
+			case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG:
+			case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG:
+				return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
+			}
+		}
+		if (subsys == LPFC_MBOX_SUBSYSTEM_FCOE) {
+			switch (opcode) {
+			case LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS:
+				return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
+			}
+		}
+		return LPFC_MBOX_SLI4_CONFIG_TMO;
+	}
+	return LPFC_MBOX_TMO;
+}
+
+/**
+ * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command
+ * @mbox: pointer to lpfc mbox command.
+ * @sgentry: sge entry index.
+ * @phyaddr: physical address for the sge
+ * @length: Length of the sge.
+ *
+ * This routine sets up an entry in the non-embedded mailbox command at the sge
+ * index location.
+ **/
+void
+lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
+		      dma_addr_t phyaddr, uint32_t length)
+{
+	struct lpfc_mbx_nembed_cmd *nembed_sge;
+
+	nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+				&mbox->u.mqe.un.nembed_cmd;
+	nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
+	nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
+	nembed_sge->sge[sgentry].length = length;
+}
+
+/**
+ * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command
+ * @mbox: pointer to lpfc mbox command.
+ * @sgentry: sge entry index.
+ *
+ * This routine gets an entry from the non-embedded mailbox command at the sge
+ * index location.
+ **/
+void
+lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
+		      struct lpfc_mbx_sge *sge)
+{
+	struct lpfc_mbx_nembed_cmd *nembed_sge;
+
+	nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+				&mbox->u.mqe.un.nembed_cmd;
+	sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
+	sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
+	sge->length = nembed_sge->sge[sgentry].length;
+}
+
+/**
+ * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command.
+ *
+ * This routine frees SLI4 specific mailbox command for sending IOCTL command.
+ **/
+void
+lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+	struct lpfc_mbx_sli4_config *sli4_cfg;
+	struct lpfc_mbx_sge sge;
+	dma_addr_t phyaddr;
+	uint32_t sgecount, sgentry;
+
+	sli4_cfg = &mbox->u.mqe.un.sli4_config;
+
+	/* For embedded mbox command, just free the mbox command */
+	if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
+		mempool_free(mbox, phba->mbox_mem_pool);
+		return;
+	}
+
+	/* For non-embedded mbox command, we need to free the pages first */
+	sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
+	/* There is nothing we can do if there is no sge address array */
+	if (unlikely(!mbox->sge_array)) {
+		mempool_free(mbox, phba->mbox_mem_pool);
+		return;
+	}
+	/* Each non-embedded DMA memory was allocated in the length of a page */
+	for (sgentry = 0; sgentry < sgecount; sgentry++) {
+		lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
+		phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
+		dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
+				  mbox->sge_array->addr[sgentry], phyaddr);
+	}
+	/* Free the sge address array memory */
+	kfree(mbox->sge_array);
+	/* Finally, free the mailbox command itself */
+	mempool_free(mbox, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_config - Initialize the  SLI4 Config Mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command.
+ * @subsystem: The sli4 config sub mailbox subsystem.
+ * @opcode: The sli4 config sub mailbox command opcode.
+ * @length: Length of the sli4 config mailbox command (including sub-header).
+ *
+ * This routine sets up the header fields of SLI4 specific mailbox command
+ * for sending IOCTL command.
+ *
+ * Return: the actual length of the mbox command allocated (mostly useful
+ *         for none embedded mailbox command).
+ **/
+int
+lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
+		 uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
+{
+	struct lpfc_mbx_sli4_config *sli4_config;
+	union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
+	uint32_t alloc_len;
+	uint32_t resid_len;
+	uint32_t pagen, pcount;
+	void *viraddr;
+	dma_addr_t phyaddr;
+
+	/* Set up SLI4 mailbox command header fields */
+	memset(mbox, 0, sizeof(*mbox));
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
+
+	/* Set up SLI4 ioctl command header fields */
+	sli4_config = &mbox->u.mqe.un.sli4_config;
+
+	/* Setup for the embedded mbox command */
+	if (emb) {
+		/* Set up main header fields */
+		bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
+		sli4_config->header.cfg_mhdr.payload_length = length;
+		/* Set up sub-header fields following main header */
+		bf_set(lpfc_mbox_hdr_opcode,
+			&sli4_config->header.cfg_shdr.request, opcode);
+		bf_set(lpfc_mbox_hdr_subsystem,
+			&sli4_config->header.cfg_shdr.request, subsystem);
+		sli4_config->header.cfg_shdr.request.request_length =
+			length - LPFC_MBX_CMD_HDR_LENGTH;
+		return length;
+	}
+
+	/* Setup for the non-embedded mbox command */
+	pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
+	pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
+				LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
+	/* Allocate record for keeping SGE virtual addresses */
+	mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
+				  GFP_KERNEL);
+	if (!mbox->sge_array) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"2527 Failed to allocate non-embedded SGE "
+				"array.\n");
+		return 0;
+	}
+	for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
+		/* The DMA memory is always allocated in the length of a
+		 * page even though the last SGE might not fill up to a
+		 * page, this is used as a priori size of SLI4_PAGE_SIZE for
+		 * the later DMA memory free.
+		 */
+		viraddr = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
+					     &phyaddr, GFP_KERNEL);
+		/* In case of malloc fails, proceed with whatever we have */
+		if (!viraddr)
+			break;
+		memset(viraddr, 0, SLI4_PAGE_SIZE);
+		mbox->sge_array->addr[pagen] = viraddr;
+		/* Keep the first page for later sub-header construction */
+		if (pagen == 0)
+			cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
+		resid_len = length - alloc_len;
+		if (resid_len > SLI4_PAGE_SIZE) {
+			lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
+					      SLI4_PAGE_SIZE);
+			alloc_len += SLI4_PAGE_SIZE;
+		} else {
+			lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
+					      resid_len);
+			alloc_len = length;
+		}
+	}
+
+	/* Set up main header fields in mailbox command */
+	sli4_config->header.cfg_mhdr.payload_length = alloc_len;
+	bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
+
+	/* Set up sub-header fields into the first page */
+	if (pagen > 0) {
+		bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
+		bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
+		cfg_shdr->request.request_length =
+				alloc_len - sizeof(union  lpfc_sli4_cfg_shdr);
+	}
+	/* The sub-header is in DMA memory, which needs endian converstion */
+	if (cfg_shdr)
+		lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
+				      sizeof(union  lpfc_sli4_cfg_shdr));
+	return alloc_len;
+}
+
+/**
+ * lpfc_sli4_mbox_rsrc_extent - Initialize the opcode resource extent.
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to an allocated lpfc mbox resource.
+ * @exts_count: the number of extents, if required, to allocate.
+ * @rsrc_type: the resource extent type.
+ * @emb: true if LPFC_SLI4_MBX_EMBED. false if LPFC_SLI4_MBX_NEMBED.
+ *
+ * This routine completes the subcommand header for SLI4 resource extent
+ * mailbox commands.  It is called after lpfc_sli4_config.  The caller must
+ * pass an allocated mailbox and the attributes required to initialize the
+ * mailbox correctly.
+ *
+ * Return: the actual length of the mbox command allocated.
+ **/
+int
+lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
+			   uint16_t exts_count, uint16_t rsrc_type, bool emb)
+{
+	uint8_t opcode = 0;
+	struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
+	void *virtaddr = NULL;
+
+	/* Set up SLI4 ioctl command header fields */
+	if (emb == LPFC_SLI4_MBX_NEMBED) {
+		/* Get the first SGE entry from the non-embedded DMA memory */
+		virtaddr = mbox->sge_array->addr[0];
+		if (virtaddr == NULL)
+			return 1;
+		n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+	}
+
+	/*
+	 * The resource type is common to all extent Opcodes and resides in the
+	 * same position.
+	 */
+	if (emb == LPFC_SLI4_MBX_EMBED)
+		bf_set(lpfc_mbx_alloc_rsrc_extents_type,
+		       &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
+		       rsrc_type);
+	else {
+		/* This is DMA data.  Byteswap is required. */
+		bf_set(lpfc_mbx_alloc_rsrc_extents_type,
+		       n_rsrc_extnt, rsrc_type);
+		lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
+				      &n_rsrc_extnt->word4,
+				      sizeof(uint32_t));
+	}
+
+	/* Complete the initialization for the particular Opcode. */
+	opcode = lpfc_sli_config_mbox_opcode_get(phba, mbox);
+	switch (opcode) {
+	case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
+		if (emb == LPFC_SLI4_MBX_EMBED)
+			bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
+			       &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
+			       exts_count);
+		else
+			bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
+			       n_rsrc_extnt, exts_count);
+		break;
+	case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT:
+	case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO:
+	case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT:
+		/* Initialization is complete.*/
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"2929 Resource Extent Opcode x%x is "
+				"unsupported\n", opcode);
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_config_mbox_subsys_get - Get subsystem from a sli_config mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command queue entry.
+ *
+ * This routine gets the subsystem from a SLI4 specific SLI_CONFIG mailbox
+ * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if the
+ * sub-header is not present, subsystem LPFC_MBOX_SUBSYSTEM_NA (0x0) shall
+ * be returned.
+ **/
+uint8_t
+lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+	struct lpfc_mbx_sli4_config *sli4_cfg;
+	union lpfc_sli4_cfg_shdr *cfg_shdr;
+
+	if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
+		return LPFC_MBOX_SUBSYSTEM_NA;
+	sli4_cfg = &mbox->u.mqe.un.sli4_config;
+
+	/* For embedded mbox command, get opcode from embedded sub-header*/
+	if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
+		cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
+		return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
+	}
+
+	/* For non-embedded mbox command, get opcode from first dma page */
+	if (unlikely(!mbox->sge_array))
+		return LPFC_MBOX_SUBSYSTEM_NA;
+	cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
+	return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
+}
+
+/**
+ * lpfc_sli_config_mbox_opcode_get - Get opcode from a sli_config mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command queue entry.
+ *
+ * This routine gets the opcode from a SLI4 specific SLI_CONFIG mailbox
+ * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if
+ * the sub-header is not present, opcode LPFC_MBOX_OPCODE_NA (0x0) be
+ * returned.
+ **/
+uint8_t
+lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+	struct lpfc_mbx_sli4_config *sli4_cfg;
+	union lpfc_sli4_cfg_shdr *cfg_shdr;
+
+	if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
+		return LPFC_MBOX_OPCODE_NA;
+	sli4_cfg = &mbox->u.mqe.un.sli4_config;
+
+	/* For embedded mbox command, get opcode from embedded sub-header*/
+	if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
+		cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
+		return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
+	}
+
+	/* For non-embedded mbox command, get opcode from first dma page */
+	if (unlikely(!mbox->sge_array))
+		return LPFC_MBOX_OPCODE_NA;
+	cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
+	return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
+}
+
+/**
+ * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: index to fcf table.
+ *
+ * This routine routine allocates and constructs non-embedded mailbox command
+ * for reading a FCF table entry referred by @fcf_index.
+ *
+ * Return: pointer to the mailbox command constructed if successful, otherwise
+ * NULL.
+ **/
+int
+lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
+			   struct lpfcMboxq *mboxq,
+			   uint16_t fcf_index)
+{
+	void *virt_addr;
+	dma_addr_t phys_addr;
+	uint8_t *bytep;
+	struct lpfc_mbx_sge sge;
+	uint32_t alloc_len, req_len;
+	struct lpfc_mbx_read_fcf_tbl *read_fcf;
+
+	if (!mboxq)
+		return -ENOMEM;
+
+	req_len = sizeof(struct fcf_record) +
+		  sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
+
+	/* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
+	alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+			LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
+			LPFC_SLI4_MBX_NEMBED);
+
+	if (alloc_len < req_len) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"0291 Allocated DMA memory size (x%x) is "
+				"less than the requested DMA memory "
+				"size (x%x)\n", alloc_len, req_len);
+		return -ENOMEM;
+	}
+
+	/* Get the first SGE entry from the non-embedded DMA memory. This
+	 * routine only uses a single SGE.
+	 */
+	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
+	phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
+	virt_addr = mboxq->sge_array->addr[0];
+	read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
+
+	/* Set up command fields */
+	bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
+	/* Perform necessary endian conversion */
+	bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
+	lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
+
+	return 0;
+}
+
+/**
+ * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
+ * @mboxq: pointer to lpfc mbox command.
+ *
+ * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES
+ * mailbox command.
+ **/
+void
+lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
+{
+	/* Set up SLI4 mailbox command header fields */
+	memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
+	bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
+
+	/* Set up host requested features. */
+	bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
+	bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1);
+
+	/* Enable DIF (block guard) only if configured to do so. */
+	if (phba->cfg_enable_bg)
+		bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
+
+	/* Enable NPIV only if configured to do so. */
+	if (phba->max_vpi && phba->cfg_enable_npiv)
+		bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
+
+	return;
+}
+
+/**
+ * lpfc_init_vfi - Initialize the INIT_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vport: Vport associated with the VF.
+ *
+ * This routine initializes @mbox to all zeros and then fills in the mailbox
+ * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI
+ * in the context of an FCF. The driver issues this command to setup a VFI
+ * before issuing a FLOGI to login to the VSAN. The driver should also issue a
+ * REG_VFI after a successful VSAN login.
+ **/
+void
+lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
+{
+	struct lpfc_mbx_init_vfi *init_vfi;
+
+	memset(mbox, 0, sizeof(*mbox));
+	mbox->vport = vport;
+	init_vfi = &mbox->u.mqe.un.init_vfi;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
+	bf_set(lpfc_init_vfi_vr, init_vfi, 1);
+	bf_set(lpfc_init_vfi_vt, init_vfi, 1);
+	bf_set(lpfc_init_vfi_vp, init_vfi, 1);
+	bf_set(lpfc_init_vfi_vfi, init_vfi,
+	       vport->phba->sli4_hba.vfi_ids[vport->vfi]);
+	bf_set(lpfc_init_vfi_vpi, init_vfi,
+	       vport->phba->vpi_ids[vport->vpi]);
+	bf_set(lpfc_init_vfi_fcfi, init_vfi,
+	       vport->phba->fcf.fcfi);
+}
+
+/**
+ * lpfc_reg_vfi - Initialize the REG_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vport: vport associated with the VF.
+ * @phys: BDE DMA bus address used to send the service parameters to the HBA.
+ *
+ * This routine initializes @mbox to all zeros and then fills in the mailbox
+ * fields from @vport, and uses @buf as a DMAable buffer to send the vport's
+ * fc service parameters to the HBA for this VFI. REG_VFI configures virtual
+ * fabrics identified by VFI in the context of an FCF.
+ **/
+void
+lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
+{
+	struct lpfc_mbx_reg_vfi *reg_vfi;
+
+	memset(mbox, 0, sizeof(*mbox));
+	reg_vfi = &mbox->u.mqe.un.reg_vfi;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
+	bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
+	bf_set(lpfc_reg_vfi_vfi, reg_vfi,
+	       vport->phba->sli4_hba.vfi_ids[vport->vfi]);
+	bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
+	bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]);
+	memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
+	reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
+	reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
+	reg_vfi->e_d_tov = vport->phba->fc_edtov;
+	reg_vfi->r_a_tov = vport->phba->fc_ratov;
+	reg_vfi->bde.addrHigh = putPaddrHigh(phys);
+	reg_vfi->bde.addrLow = putPaddrLow(phys);
+	reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
+	reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+	bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
+			"3134 Register VFI, mydid:x%x, fcfi:%d, "
+			" vfi:%d, vpi:%d, fc_pname:%x%x\n",
+			vport->fc_myDID,
+			vport->phba->fcf.fcfi,
+			vport->phba->sli4_hba.vfi_ids[vport->vfi],
+			vport->phba->vpi_ids[vport->vpi],
+			reg_vfi->wwn[0], reg_vfi->wwn[1]);
+}
+
+/**
+ * lpfc_init_vpi - Initialize the INIT_VPI mailbox command
+ * @phba: pointer to the hba structure to init the VPI for.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vpi: VPI to be initialized.
+ *
+ * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the
+ * command to activate a virtual N_Port. The HBA assigns a MAC address to use
+ * with the virtual N Port.  The SLI Host issues this command before issuing a
+ * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a
+ * successful virtual NPort login.
+ **/
+void
+lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
+{
+	memset(mbox, 0, sizeof(*mbox));
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
+	bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
+	       phba->vpi_ids[vpi]);
+	bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
+	       phba->sli4_hba.vfi_ids[phba->pport->vfi]);
+}
+
+/**
+ * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vport: vport associated with the VF.
+ *
+ * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
+ * (logical NPort) into the inactive state. The SLI Host must have logged out
+ * and unregistered all remote N_Ports to abort any activity on the virtual
+ * fabric. The SLI Port posts the mailbox response after marking the virtual
+ * fabric inactive.
+ **/
+void
+lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
+{
+	memset(mbox, 0, sizeof(*mbox));
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
+	bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
+	       vport->phba->sli4_hba.vfi_ids[vport->vfi]);
+}
+
+/**
+ * lpfc_sli4_dump_cfg_rg23 - Dump sli4 port config region 23
+ * @phba: pointer to the hba structure containing.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * This function create a SLI4 dump mailbox command to dump configure
+ * region 23.
+ **/
+int
+lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+	struct lpfc_dmabuf *mp = NULL;
+	MAILBOX_t *mb;
+
+	memset(mbox, 0, sizeof(*mbox));
+	mb = &mbox->u.mb;
+
+	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (mp)
+		mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+
+	if (!mp || !mp->virt) {
+		kfree(mp);
+		/* dump config region 23 failed to allocate memory */
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+			"2569 lpfc dump config region 23: memory"
+			" allocation failed\n");
+		return 1;
+	}
+
+	memset(mp->virt, 0, LPFC_BPL_SIZE);
+	INIT_LIST_HEAD(&mp->list);
+
+	/* save address for completion */
+	mbox->context1 = (uint8_t *) mp;
+
+	mb->mbxCommand = MBX_DUMP_MEMORY;
+	mb->un.varDmp.type = DMP_NV_PARAMS;
+	mb->un.varDmp.region_id = DMP_REGION_23;
+	mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
+	mb->un.varWords[3] = putPaddrLow(mp->phys);
+	mb->un.varWords[4] = putPaddrHigh(mp->phys);
+	return 0;
+}
+
+/**
+ * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command
+ * @phba: pointer to the hba structure containing the FCF index and RQ ID.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The
+ * SLI Host uses the command to activate an FCF after it has acquired FCF
+ * information via a READ_FCF mailbox command. This mailbox command also is used
+ * to indicate where received unsolicited frames from this FCF will be sent. By
+ * default this routine will set up the FCF to forward all unsolicited frames
+ * the the RQ ID passed in the @phba. This can be overridden by the caller for
+ * more complicated setups.
+ **/
+void
+lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+	struct lpfc_mbx_reg_fcfi *reg_fcfi;
+
+	memset(mbox, 0, sizeof(*mbox));
+	reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
+	bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
+	bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
+	bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
+	bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
+	bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
+	       phba->fcf.current_rec.fcf_indx);
+	/* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
+	bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3);
+	if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
+		bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
+		bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
+		       phba->fcf.current_rec.vlan_id);
+	}
+}
+
+/**
+ * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @fcfi: FCFI to be unregistered.
+ *
+ * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs).
+ * The SLI Host uses the command to inactivate an FCFI.
+ **/
+void
+lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
+{
+	memset(mbox, 0, sizeof(*mbox));
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
+	bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
+}
+
+/**
+ * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @ndlp: The nodelist structure that describes the RPI to resume.
+ *
+ * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a
+ * link event.
+ **/
+void
+lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba *phba = ndlp->phba;
+	struct lpfc_mbx_resume_rpi *resume_rpi;
+
+	memset(mbox, 0, sizeof(*mbox));
+	resume_rpi = &mbox->u.mqe.un.resume_rpi;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
+	bf_set(lpfc_resume_rpi_index, resume_rpi,
+	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+	bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
+	resume_rpi->event_tag = ndlp->phba->fc_eventTag;
+}
+
+/**
+ * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages
+ *                        mailbox command.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * The PORT_CAPABILITIES supported pages mailbox command is issued to
+ * retrieve the particular feature pages supported by the port.
+ **/
+void
+lpfc_supported_pages(struct lpfcMboxq *mbox)
+{
+	struct lpfc_mbx_supp_pages *supp_pages;
+
+	memset(mbox, 0, sizeof(*mbox));
+	supp_pages = &mbox->u.mqe.un.supp_pages;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
+	bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
+}
+
+/**
+ * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
+ * retrieve the particular SLI4 features supported by the port.
+ **/
+void
+lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
+{
+	struct lpfc_mbx_pc_sli4_params *sli4_params;
+
+	memset(mbox, 0, sizeof(*mbox));
+	sli4_params = &mbox->u.mqe.un.sli4_params;
+	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
+	bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_mem.c b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_mem.c
new file mode 100644
index 0000000..ade763d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_mem.c
@@ -0,0 +1,529 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include <scsi/scsi.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+
+#define LPFC_MBUF_POOL_SIZE     64      /* max elements in MBUF safety pool */
+#define LPFC_MEM_POOL_SIZE      64      /* max elem in non-DMA safety pool */
+
+
+/**
+ * lpfc_mem_alloc - create and allocate all PCI and memory pools
+ * @phba: HBA to allocate pools for
+ *
+ * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
+ * lpfc_mbuf_pool, lpfc_hrb_pool.  Creates and allocates kmalloc-backed mempools
+ * for LPFC_MBOXQ_t and lpfc_nodelist.  Also allocates the VPI bitmask.
+ *
+ * Notes: Not interrupt-safe.  Must be called with no locks held.  If any
+ * allocation fails, frees all successfully allocated memory before returning.
+ *
+ * Returns:
+ *   0 on success
+ *   -ENOMEM on failure (if any memory allocations fail)
+ **/
+int
+lpfc_mem_alloc(struct lpfc_hba *phba, int align)
+{
+	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+	int i;
+
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		phba->lpfc_scsi_dma_buf_pool =
+			pci_pool_create("lpfc_scsi_dma_buf_pool",
+				phba->pcidev,
+				phba->cfg_sg_dma_buf_size,
+				phba->cfg_sg_dma_buf_size,
+				0);
+	else
+		phba->lpfc_scsi_dma_buf_pool =
+			pci_pool_create("lpfc_scsi_dma_buf_pool",
+				phba->pcidev, phba->cfg_sg_dma_buf_size,
+				align, 0);
+	if (!phba->lpfc_scsi_dma_buf_pool)
+		goto fail;
+
+	phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
+							LPFC_BPL_SIZE,
+							align, 0);
+	if (!phba->lpfc_mbuf_pool)
+		goto fail_free_dma_buf_pool;
+
+	pool->elements = kmalloc(sizeof(struct lpfc_dmabuf) *
+					 LPFC_MBUF_POOL_SIZE, GFP_KERNEL);
+	if (!pool->elements)
+		goto fail_free_lpfc_mbuf_pool;
+
+	pool->max_count = 0;
+	pool->current_count = 0;
+	for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
+		pool->elements[i].virt = pci_pool_alloc(phba->lpfc_mbuf_pool,
+				       GFP_KERNEL, &pool->elements[i].phys);
+		if (!pool->elements[i].virt)
+			goto fail_free_mbuf_pool;
+		pool->max_count++;
+		pool->current_count++;
+	}
+
+	phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+							 sizeof(LPFC_MBOXQ_t));
+	if (!phba->mbox_mem_pool)
+		goto fail_free_mbuf_pool;
+
+	phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+						sizeof(struct lpfc_nodelist));
+	if (!phba->nlp_mem_pool)
+		goto fail_free_mbox_pool;
+
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		phba->rrq_pool =
+			mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+						sizeof(struct lpfc_node_rrq));
+		if (!phba->rrq_pool)
+			goto fail_free_nlp_mem_pool;
+		phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
+					      phba->pcidev,
+					      LPFC_HDR_BUF_SIZE, align, 0);
+		if (!phba->lpfc_hrb_pool)
+			goto fail_free_rrq_mem_pool;
+
+		phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
+					      phba->pcidev,
+					      LPFC_DATA_BUF_SIZE, align, 0);
+		if (!phba->lpfc_drb_pool)
+			goto fail_free_hrb_pool;
+		phba->lpfc_hbq_pool = NULL;
+	} else {
+		phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",
+			phba->pcidev, LPFC_BPL_SIZE, align, 0);
+		if (!phba->lpfc_hbq_pool)
+			goto fail_free_nlp_mem_pool;
+		phba->lpfc_hrb_pool = NULL;
+		phba->lpfc_drb_pool = NULL;
+	}
+
+	return 0;
+ fail_free_hrb_pool:
+	pci_pool_destroy(phba->lpfc_hrb_pool);
+	phba->lpfc_hrb_pool = NULL;
+ fail_free_rrq_mem_pool:
+	mempool_destroy(phba->rrq_pool);
+	phba->rrq_pool = NULL;
+ fail_free_nlp_mem_pool:
+	mempool_destroy(phba->nlp_mem_pool);
+	phba->nlp_mem_pool = NULL;
+ fail_free_mbox_pool:
+	mempool_destroy(phba->mbox_mem_pool);
+	phba->mbox_mem_pool = NULL;
+ fail_free_mbuf_pool:
+	while (i--)
+		pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+						 pool->elements[i].phys);
+	kfree(pool->elements);
+ fail_free_lpfc_mbuf_pool:
+	pci_pool_destroy(phba->lpfc_mbuf_pool);
+	phba->lpfc_mbuf_pool = NULL;
+ fail_free_dma_buf_pool:
+	pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
+	phba->lpfc_scsi_dma_buf_pool = NULL;
+ fail:
+	return -ENOMEM;
+}
+
+/**
+ * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
+ * @phba: HBA to free memory for
+ *
+ * Description: Free the memory allocated by lpfc_mem_alloc routine. This
+ * routine is a the counterpart of lpfc_mem_alloc.
+ *
+ * Returns: None
+ **/
+void
+lpfc_mem_free(struct lpfc_hba *phba)
+{
+	int i;
+	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+
+	/* Free HBQ pools */
+	lpfc_sli_hbqbuf_free_all(phba);
+	if (phba->lpfc_drb_pool)
+		pci_pool_destroy(phba->lpfc_drb_pool);
+	phba->lpfc_drb_pool = NULL;
+	if (phba->lpfc_hrb_pool)
+		pci_pool_destroy(phba->lpfc_hrb_pool);
+	phba->lpfc_hrb_pool = NULL;
+
+	if (phba->lpfc_hbq_pool)
+		pci_pool_destroy(phba->lpfc_hbq_pool);
+	phba->lpfc_hbq_pool = NULL;
+
+	/* Free NLP memory pool */
+	mempool_destroy(phba->nlp_mem_pool);
+	phba->nlp_mem_pool = NULL;
+
+	/* Free mbox memory pool */
+	mempool_destroy(phba->mbox_mem_pool);
+	phba->mbox_mem_pool = NULL;
+
+	/* Free MBUF memory pool */
+	for (i = 0; i < pool->current_count; i++)
+		pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+			      pool->elements[i].phys);
+	kfree(pool->elements);
+
+	pci_pool_destroy(phba->lpfc_mbuf_pool);
+	phba->lpfc_mbuf_pool = NULL;
+
+	/* Free DMA buffer memory pool */
+	pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
+	phba->lpfc_scsi_dma_buf_pool = NULL;
+
+	return;
+}
+
+/**
+ * lpfc_mem_free_all - Frees all PCI and driver memory
+ * @phba: HBA to free memory for
+ *
+ * Description: Free memory from PCI and driver memory pools and also those
+ * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
+ * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
+ * the VPI bitmask.
+ *
+ * Returns: None
+ **/
+void
+lpfc_mem_free_all(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	LPFC_MBOXQ_t *mbox, *next_mbox;
+	struct lpfc_dmabuf   *mp;
+
+	/* Free memory used in mailbox queue back to mailbox memory pool */
+	list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
+		mp = (struct lpfc_dmabuf *) (mbox->context1);
+		if (mp) {
+			lpfc_mbuf_free(phba, mp->virt, mp->phys);
+			kfree(mp);
+		}
+		list_del(&mbox->list);
+		mempool_free(mbox, phba->mbox_mem_pool);
+	}
+	/* Free memory used in mailbox cmpl list back to mailbox memory pool */
+	list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
+		mp = (struct lpfc_dmabuf *) (mbox->context1);
+		if (mp) {
+			lpfc_mbuf_free(phba, mp->virt, mp->phys);
+			kfree(mp);
+		}
+		list_del(&mbox->list);
+		mempool_free(mbox, phba->mbox_mem_pool);
+	}
+	/* Free the active mailbox command back to the mailbox memory pool */
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+	spin_unlock_irq(&phba->hbalock);
+	if (psli->mbox_active) {
+		mbox = psli->mbox_active;
+		mp = (struct lpfc_dmabuf *) (mbox->context1);
+		if (mp) {
+			lpfc_mbuf_free(phba, mp->virt, mp->phys);
+			kfree(mp);
+		}
+		mempool_free(mbox, phba->mbox_mem_pool);
+		psli->mbox_active = NULL;
+	}
+
+	/* Free and destroy all the allocated memory pools */
+	lpfc_mem_free(phba);
+
+	/* Free the iocb lookup array */
+	kfree(psli->iocbq_lookup);
+	psli->iocbq_lookup = NULL;
+
+	return;
+}
+
+/**
+ * lpfc_mbuf_alloc - Allocate an mbuf from the lpfc_mbuf_pool PCI pool
+ * @phba: HBA which owns the pool to allocate from
+ * @mem_flags: indicates if this is a priority (MEM_PRI) allocation
+ * @handle: used to return the DMA-mapped address of the mbuf
+ *
+ * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool.
+ * Allocates from generic pci_pool_alloc function first and if that fails and
+ * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the
+ * HBA's pool.
+ *
+ * Notes: Not interrupt-safe.  Must be called with no locks held.  Takes
+ * phba->hbalock.
+ *
+ * Returns:
+ *   pointer to the allocated mbuf on success
+ *   NULL on failure
+ **/
+void *
+lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
+{
+	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+	unsigned long iflags;
+	void *ret;
+
+	ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
+		pool->current_count--;
+		ret = pool->elements[pool->current_count].virt;
+		*handle = pool->elements[pool->current_count].phys;
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return ret;
+}
+
+/**
+ * __lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (locked)
+ * @phba: HBA which owns the pool to return to
+ * @virt: mbuf to free
+ * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
+ *
+ * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
+ * it is below its max_count, frees the mbuf otherwise.
+ *
+ * Notes: Must be called with phba->hbalock held to synchronize access to
+ * lpfc_mbuf_safety_pool.
+ *
+ * Returns: None
+ **/
+void
+__lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
+{
+	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+
+	if (pool->current_count < pool->max_count) {
+		pool->elements[pool->current_count].virt = virt;
+		pool->elements[pool->current_count].phys = dma;
+		pool->current_count++;
+	} else {
+		pci_pool_free(phba->lpfc_mbuf_pool, virt, dma);
+	}
+	return;
+}
+
+/**
+ * lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked)
+ * @phba: HBA which owns the pool to return to
+ * @virt: mbuf to free
+ * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
+ *
+ * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
+ * it is below its max_count, frees the mbuf otherwise.
+ *
+ * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
+{
+	unsigned long iflags;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	__lpfc_mbuf_free(phba, virt, dma);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return;
+}
+
+/**
+ * lpfc_els_hbq_alloc - Allocate an HBQ buffer
+ * @phba: HBA to allocate HBQ buffer for
+ *
+ * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
+ * pool along a non-DMA-mapped container for it.
+ *
+ * Notes: Not interrupt-safe.  Must be called with no locks held.
+ *
+ * Returns:
+ *   pointer to HBQ on success
+ *   NULL on failure
+ **/
+struct hbq_dmabuf *
+lpfc_els_hbq_alloc(struct lpfc_hba *phba)
+{
+	struct hbq_dmabuf *hbqbp;
+
+	hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
+	if (!hbqbp)
+		return NULL;
+
+	hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
+					  &hbqbp->dbuf.phys);
+	if (!hbqbp->dbuf.virt) {
+		kfree(hbqbp);
+		return NULL;
+	}
+	hbqbp->size = LPFC_BPL_SIZE;
+	return hbqbp;
+}
+
+/**
+ * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
+ * @phba: HBA buffer was allocated for
+ * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
+ *
+ * Description: Frees both the container and the DMA-mapped buffer returned by
+ * lpfc_els_hbq_alloc.
+ *
+ * Notes: Can be called with or without locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
+{
+	pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
+	kfree(hbqbp);
+	return;
+}
+
+/**
+ * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
+ * @phba: HBA to allocate a receive buffer for
+ *
+ * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
+ * pool along a non-DMA-mapped container for it.
+ *
+ * Notes: Not interrupt-safe.  Must be called with no locks held.
+ *
+ * Returns:
+ *   pointer to HBQ on success
+ *   NULL on failure
+ **/
+struct hbq_dmabuf *
+lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
+{
+	struct hbq_dmabuf *dma_buf;
+
+	dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
+	if (!dma_buf)
+		return NULL;
+
+	dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
+					    &dma_buf->hbuf.phys);
+	if (!dma_buf->hbuf.virt) {
+		kfree(dma_buf);
+		return NULL;
+	}
+	dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
+					    &dma_buf->dbuf.phys);
+	if (!dma_buf->dbuf.virt) {
+		pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
+			      dma_buf->hbuf.phys);
+		kfree(dma_buf);
+		return NULL;
+	}
+	dma_buf->size = LPFC_BPL_SIZE;
+	return dma_buf;
+}
+
+/**
+ * lpfc_sli4_rb_free - Frees a receive buffer
+ * @phba: HBA buffer was allocated for
+ * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc
+ *
+ * Description: Frees both the container and the DMA-mapped buffers returned by
+ * lpfc_sli4_rb_alloc.
+ *
+ * Notes: Can be called with or without locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
+{
+	pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
+	pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
+	kfree(dmab);
+	return;
+}
+
+/**
+ * lpfc_in_buf_free - Free a DMA buffer
+ * @phba: HBA buffer is associated with
+ * @mp: Buffer to free
+ *
+ * Description: Frees the given DMA buffer in the appropriate way given if the
+ * HBA is running in SLI3 mode with HBQs enabled.
+ *
+ * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
+{
+	struct hbq_dmabuf *hbq_entry;
+	unsigned long flags;
+
+	if (!mp)
+		return;
+
+	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+		/* Check whether HBQ is still in use */
+		spin_lock_irqsave(&phba->hbalock, flags);
+		if (!phba->hbq_in_use) {
+			spin_unlock_irqrestore(&phba->hbalock, flags);
+			return;
+		}
+		hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
+		list_del(&hbq_entry->dbuf.list);
+		if (hbq_entry->tag == -1) {
+			(phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
+				(phba, hbq_entry);
+		} else {
+			lpfc_sli_free_hbq(phba, hbq_entry);
+		}
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+	} else {
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+	}
+	return;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_nl.h b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_nl.h
new file mode 100644
index 0000000..f2b1bbc
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_nl.h
@@ -0,0 +1,179 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2010 Emulex.  All rights reserved.                *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+/* Event definitions for RegisterForEvent */
+#define FC_REG_LINK_EVENT		0x0001	/* link up / down events */
+#define FC_REG_RSCN_EVENT		0x0002	/* RSCN events */
+#define FC_REG_CT_EVENT			0x0004	/* CT request events */
+#define FC_REG_DUMP_EVENT		0x0010	/* Dump events */
+#define FC_REG_TEMPERATURE_EVENT	0x0020	/* temperature events */
+#define FC_REG_VPORTRSCN_EVENT		0x0040	/* Vport RSCN events */
+#define FC_REG_ELS_EVENT		0x0080	/* lpfc els events */
+#define FC_REG_FABRIC_EVENT		0x0100	/* lpfc fabric events */
+#define FC_REG_SCSI_EVENT		0x0200	/* lpfc scsi events */
+#define FC_REG_BOARD_EVENT		0x0400	/* lpfc board events */
+#define FC_REG_ADAPTER_EVENT		0x0800	/* lpfc adapter events */
+#define FC_REG_EVENT_MASK		(FC_REG_LINK_EVENT | \
+						FC_REG_RSCN_EVENT | \
+						FC_REG_CT_EVENT | \
+						FC_REG_DUMP_EVENT | \
+						FC_REG_TEMPERATURE_EVENT | \
+						FC_REG_VPORTRSCN_EVENT | \
+						FC_REG_ELS_EVENT | \
+						FC_REG_FABRIC_EVENT | \
+						FC_REG_SCSI_EVENT | \
+						FC_REG_BOARD_EVENT | \
+						FC_REG_ADAPTER_EVENT)
+/* Temperature events */
+#define LPFC_CRIT_TEMP		0x1
+#define LPFC_THRESHOLD_TEMP	0x2
+#define LPFC_NORMAL_TEMP	0x3
+/*
+ * All net link event payloads will begin with and event type
+ * and subcategory. The event type must come first.
+ * The subcategory further defines the data that follows in the rest
+ * of the payload. Each category will have its own unique header plus
+ * any additional data unique to the subcategory.
+ * The payload sent via the fc transport is one-way driver->application.
+ */
+
+/* RSCN event header */
+struct lpfc_rscn_event_header {
+	uint32_t event_type;
+	uint32_t payload_length; /* RSCN data length in bytes */
+	uint32_t rscn_payload[];
+};
+
+/* els event header */
+struct lpfc_els_event_header {
+	uint32_t event_type;
+	uint32_t subcategory;
+	uint8_t wwpn[8];
+	uint8_t wwnn[8];
+};
+
+/* subcategory codes for FC_REG_ELS_EVENT */
+#define LPFC_EVENT_PLOGI_RCV		0x01
+#define LPFC_EVENT_PRLO_RCV		0x02
+#define LPFC_EVENT_ADISC_RCV		0x04
+#define LPFC_EVENT_LSRJT_RCV		0x08
+#define LPFC_EVENT_LOGO_RCV		0x10
+
+/* special els lsrjt event */
+struct lpfc_lsrjt_event {
+	struct lpfc_els_event_header header;
+	uint32_t command;
+	uint32_t reason_code;
+	uint32_t explanation;
+};
+
+/* special els logo event */
+struct lpfc_logo_event {
+	struct lpfc_els_event_header header;
+	uint8_t logo_wwpn[8];
+};
+
+/* fabric event header */
+struct lpfc_fabric_event_header {
+	uint32_t event_type;
+	uint32_t subcategory;
+	uint8_t wwpn[8];
+	uint8_t wwnn[8];
+};
+
+/* subcategory codes for FC_REG_FABRIC_EVENT */
+#define LPFC_EVENT_FABRIC_BUSY		0x01
+#define LPFC_EVENT_PORT_BUSY		0x02
+#define LPFC_EVENT_FCPRDCHKERR		0x04
+
+/* special case fabric fcprdchkerr event */
+struct lpfc_fcprdchkerr_event {
+	struct lpfc_fabric_event_header header;
+	uint32_t lun;
+	uint32_t opcode;
+	uint32_t fcpiparam;
+};
+
+
+/* scsi event header */
+struct lpfc_scsi_event_header {
+	uint32_t event_type;
+	uint32_t subcategory;
+	uint32_t lun;
+	uint8_t wwpn[8];
+	uint8_t wwnn[8];
+};
+
+/* subcategory codes for FC_REG_SCSI_EVENT */
+#define LPFC_EVENT_QFULL	0x0001
+#define LPFC_EVENT_DEVBSY	0x0002
+#define LPFC_EVENT_CHECK_COND	0x0004
+#define LPFC_EVENT_LUNRESET	0x0008
+#define LPFC_EVENT_TGTRESET	0x0010
+#define LPFC_EVENT_BUSRESET	0x0020
+#define LPFC_EVENT_VARQUEDEPTH	0x0040
+
+/* special case scsi varqueuedepth event */
+struct lpfc_scsi_varqueuedepth_event {
+	struct lpfc_scsi_event_header scsi_event;
+	uint32_t oldval;
+	uint32_t newval;
+};
+
+/* special case scsi check condition event */
+struct lpfc_scsi_check_condition_event {
+	struct lpfc_scsi_event_header scsi_event;
+	uint8_t opcode;
+	uint8_t sense_key;
+	uint8_t asc;
+	uint8_t ascq;
+};
+
+/* event codes for FC_REG_BOARD_EVENT */
+#define LPFC_EVENT_PORTINTERR		0x01
+
+/* board event header */
+struct lpfc_board_event_header {
+	uint32_t event_type;
+	uint32_t subcategory;
+};
+
+
+/* event codes for FC_REG_ADAPTER_EVENT */
+#define LPFC_EVENT_ARRIVAL	0x01
+
+/* adapter event header */
+struct lpfc_adapter_event_header {
+	uint32_t event_type;
+	uint32_t subcategory;
+};
+
+
+/* event codes for temp_event */
+#define LPFC_CRIT_TEMP		0x1
+#define LPFC_THRESHOLD_TEMP	0x2
+#define LPFC_NORMAL_TEMP	0x3
+
+struct temp_event {
+	uint32_t event_type;
+	uint32_t event_code;
+	uint32_t data;
+};
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_nportdisc.c b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_nportdisc.c
new file mode 100644
index 0000000..15ca2a9
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -0,0 +1,2382 @@
+ /*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
+
+
+/* Called to verify a rcv'ed ADISC was intended for us. */
+static int
+lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		 struct lpfc_name *nn, struct lpfc_name *pn)
+{
+	/* First, we MUST have a RPI registered */
+	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
+		return 0;
+
+	/* Compare the ADISC rsp WWNN / WWPN matches our internal node
+	 * table entry for that node.
+	 */
+	if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
+		return 0;
+
+	if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
+		return 0;
+
+	/* we match, return success */
+	return 1;
+}
+
+int
+lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		 struct serv_parm *sp, uint32_t class, int flogi)
+{
+	volatile struct serv_parm *hsp = &vport->fc_sparam;
+	uint16_t hsp_value, ssp_value = 0;
+
+	/*
+	 * The receive data field size and buffer-to-buffer receive data field
+	 * size entries are 16 bits but are represented as two 8-bit fields in
+	 * the driver data structure to account for rsvd bits and other control
+	 * bits.  Reconstruct and compare the fields as a 16-bit values before
+	 * correcting the byte values.
+	 */
+	if (sp->cls1.classValid) {
+		if (!flogi) {
+			hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
+				     hsp->cls1.rcvDataSizeLsb);
+			ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
+				     sp->cls1.rcvDataSizeLsb);
+			if (!ssp_value)
+				goto bad_service_param;
+			if (ssp_value > hsp_value) {
+				sp->cls1.rcvDataSizeLsb =
+					hsp->cls1.rcvDataSizeLsb;
+				sp->cls1.rcvDataSizeMsb =
+					hsp->cls1.rcvDataSizeMsb;
+			}
+		}
+	} else if (class == CLASS1)
+		goto bad_service_param;
+	if (sp->cls2.classValid) {
+		if (!flogi) {
+			hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
+				     hsp->cls2.rcvDataSizeLsb);
+			ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
+				     sp->cls2.rcvDataSizeLsb);
+			if (!ssp_value)
+				goto bad_service_param;
+			if (ssp_value > hsp_value) {
+				sp->cls2.rcvDataSizeLsb =
+					hsp->cls2.rcvDataSizeLsb;
+				sp->cls2.rcvDataSizeMsb =
+					hsp->cls2.rcvDataSizeMsb;
+			}
+		}
+	} else if (class == CLASS2)
+		goto bad_service_param;
+	if (sp->cls3.classValid) {
+		if (!flogi) {
+			hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
+				     hsp->cls3.rcvDataSizeLsb);
+			ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
+				     sp->cls3.rcvDataSizeLsb);
+			if (!ssp_value)
+				goto bad_service_param;
+			if (ssp_value > hsp_value) {
+				sp->cls3.rcvDataSizeLsb =
+					hsp->cls3.rcvDataSizeLsb;
+				sp->cls3.rcvDataSizeMsb =
+					hsp->cls3.rcvDataSizeMsb;
+			}
+		}
+	} else if (class == CLASS3)
+		goto bad_service_param;
+
+	/*
+	 * Preserve the upper four bits of the MSB from the PLOGI response.
+	 * These bits contain the Buffer-to-Buffer State Change Number
+	 * from the target and need to be passed to the FW.
+	 */
+	hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
+	ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
+	if (ssp_value > hsp_value) {
+		sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
+		sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
+				       (hsp->cmn.bbRcvSizeMsb & 0x0F);
+	}
+
+	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
+	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
+	return 1;
+bad_service_param:
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+			 "0207 Device %x "
+			 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
+			 "invalid service parameters.  Ignoring device.\n",
+			 ndlp->nlp_DID,
+			 sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
+			 sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
+			 sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
+			 sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
+	return 0;
+}
+
+static void *
+lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_dmabuf *pcmd, *prsp;
+	uint32_t *lp;
+	void     *ptr = NULL;
+	IOCB_t   *irsp;
+
+	irsp = &rspiocb->iocb;
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+	/* For lpfc_els_abort, context2 could be zero'ed to delay
+	 * freeing associated memory till after ABTS completes.
+	 */
+	if (pcmd) {
+		prsp =  list_get_first(&pcmd->list, struct lpfc_dmabuf,
+				       list);
+		if (prsp) {
+			lp = (uint32_t *) prsp->virt;
+			ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
+		}
+	} else {
+		/* Force ulpStatus error since we are returning NULL ptr */
+		if (!(irsp->ulpStatus)) {
+			irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
+			irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
+		}
+		ptr = NULL;
+	}
+	return ptr;
+}
+
+
+
+/*
+ * Free resources / clean up outstanding I/Os
+ * associated with a LPFC_NODELIST entry. This
+ * routine effectively results in a "software abort".
+ */
+int
+lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+	LIST_HEAD(completions);
+	LIST_HEAD(txcmplq_completions);
+	LIST_HEAD(abort_list);
+	struct lpfc_sli  *psli = &phba->sli;
+	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
+	struct lpfc_iocbq *iocb, *next_iocb;
+
+	/* Abort outstanding I/O on NPort <nlp_DID> */
+	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
+			 "2819 Abort outstanding I/O on NPort x%x "
+			 "Data: x%x x%x x%x\n",
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi);
+
+	lpfc_fabric_abort_nport(ndlp);
+
+	/* First check the txq */
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+		/* Check to see if iocb matches the nport we are looking for */
+		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
+			/* It matches, so deque and call compl with anp error */
+			list_move_tail(&iocb->list, &completions);
+			pring->txq_cnt--;
+		}
+	}
+
+	/* Next check the txcmplq */
+	list_splice_init(&pring->txcmplq, &txcmplq_completions);
+	spin_unlock_irq(&phba->hbalock);
+
+	list_for_each_entry_safe(iocb, next_iocb, &txcmplq_completions, list) {
+		/* Check to see if iocb matches the nport we are looking for */
+		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
+			list_add_tail(&iocb->dlist, &abort_list);
+	}
+	spin_lock_irq(&phba->hbalock);
+	list_splice(&txcmplq_completions, &pring->txcmplq);
+	spin_unlock_irq(&phba->hbalock);
+
+	list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
+			spin_lock_irq(&phba->hbalock);
+			list_del_init(&iocb->dlist);
+			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+			spin_unlock_irq(&phba->hbalock);
+	}
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_ABORTED);
+
+	lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
+	return 0;
+}
+
+static int
+lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+	       struct lpfc_iocbq *cmdiocb)
+{
+	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba    *phba = vport->phba;
+	struct lpfc_dmabuf *pcmd;
+	uint32_t *lp;
+	IOCB_t *icmd;
+	struct serv_parm *sp;
+	LPFC_MBOXQ_t *mbox;
+	struct ls_rjt stat;
+	int rc;
+
+	memset(&stat, 0, sizeof (struct ls_rjt));
+	if (vport->port_state <= LPFC_FDISC) {
+		/* Before responding to PLOGI, check for pt2pt mode.
+		 * If we are pt2pt, with an outstanding FLOGI, abort
+		 * the FLOGI and resend it first.
+		 */
+		if (vport->fc_flag & FC_PT2PT) {
+			 lpfc_els_abort_flogi(phba);
+		        if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
+				/* If the other side is supposed to initiate
+				 * the PLOGI anyway, just ACC it now and
+				 * move on with discovery.
+				 */
+				phba->fc_edtov = FF_DEF_EDTOV;
+				phba->fc_ratov = FF_DEF_RATOV;
+				/* Start discovery - this should just do
+				   CLEAR_LA */
+				lpfc_disc_start(vport);
+			} else
+				lpfc_initial_flogi(vport);
+		} else {
+			stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
+			stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+			lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+					    ndlp, NULL);
+			return 0;
+		}
+	}
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	lp = (uint32_t *) pcmd->virt;
+	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+	if (wwn_to_u64(sp->portName.u.wwn) == 0) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0140 PLOGI Reject: invalid nname\n");
+		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
+		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+			NULL);
+		return 0;
+	}
+	if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0141 PLOGI Reject: invalid pname\n");
+		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
+		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+			NULL);
+		return 0;
+	}
+	if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
+		/* Reject this request because invalid parameters */
+		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+		stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
+		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+			NULL);
+		return 0;
+	}
+	icmd = &cmdiocb->iocb;
+
+	/* PLOGI chkparm OK */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
+			 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
+			 ndlp->nlp_rpi);
+
+	if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
+		ndlp->nlp_fcp_info |= CLASS2;
+	else
+		ndlp->nlp_fcp_info |= CLASS3;
+
+	ndlp->nlp_class_sup = 0;
+	if (sp->cls1.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS1;
+	if (sp->cls2.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS2;
+	if (sp->cls3.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS3;
+	if (sp->cls4.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS4;
+	ndlp->nlp_maxframe =
+		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
+
+	/* no need to reg_login if we are already in one of these states */
+	switch (ndlp->nlp_state) {
+	case  NLP_STE_NPR_NODE:
+		if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
+			break;
+	case  NLP_STE_REG_LOGIN_ISSUE:
+	case  NLP_STE_PRLI_ISSUE:
+	case  NLP_STE_UNMAPPED_NODE:
+	case  NLP_STE_MAPPED_NODE:
+		lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
+		return 1;
+	}
+
+	if ((vport->fc_flag & FC_PT2PT) &&
+	    !(vport->fc_flag & FC_PT2PT_PLOGI)) {
+		/* rcv'ed PLOGI decides what our NPortId will be */
+		vport->fc_myDID = icmd->un.rcvels.parmRo;
+		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (mbox == NULL)
+			goto out;
+		lpfc_config_link(phba, mbox);
+		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		mbox->vport = vport;
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+		if (rc == MBX_NOT_FINISHED) {
+			mempool_free(mbox, phba->mbox_mem_pool);
+			goto out;
+		}
+
+		lpfc_can_disctmo(vport);
+	}
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		goto out;
+
+	/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_unreg_rpi(vport, ndlp);
+
+	rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
+			    (uint8_t *) sp, mbox, ndlp->nlp_rpi);
+	if (rc) {
+		mempool_free(mbox, phba->mbox_mem_pool);
+		goto out;
+	}
+
+	/* ACC PLOGI rsp command needs to execute first,
+	 * queue this mbox command to be processed later.
+	 */
+	mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+	/*
+	 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
+	 * command issued in lpfc_cmpl_els_acc().
+	 */
+	mbox->vport = vport;
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
+	spin_unlock_irq(shost->host_lock);
+
+	/*
+	 * If there is an outstanding PLOGI issued, abort it before
+	 * sending ACC rsp for received PLOGI. If pending plogi
+	 * is not canceled here, the plogi will be rejected by
+	 * remote port and will be retried. On a configuration with
+	 * single discovery thread, this will cause a huge delay in
+	 * discovery. Also this will cause multiple state machines
+	 * running in parallel for this node.
+	 */
+	if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
+		/* software abort outstanding PLOGI */
+		lpfc_els_abort(phba, ndlp);
+	}
+
+	if ((vport->port_type == LPFC_NPIV_PORT &&
+	     vport->cfg_restrict_login)) {
+
+		/* In order to preserve RPIs, we want to cleanup
+		 * the default RPI the firmware created to rcv
+		 * this ELS request. The only way to do this is
+		 * to register, then unregister the RPI.
+		 */
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
+		spin_unlock_irq(shost->host_lock);
+		stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
+		stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+		rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+			ndlp, mbox);
+		if (rc)
+			mempool_free(mbox, phba->mbox_mem_pool);
+		return 1;
+	}
+	rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
+	if (rc)
+		mempool_free(mbox, phba->mbox_mem_pool);
+	return 1;
+out:
+	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+	return 0;
+}
+
+/**
+ * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object
+ *
+ * This routine is invoked to issue a completion to a rcv'ed
+ * ADISC or PDISC after the paused RPI has been resumed.
+ **/
+static void
+lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_vport *vport;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_nodelist *ndlp;
+	uint32_t cmd;
+
+	elsiocb = (struct lpfc_iocbq *)mboxq->context1;
+	ndlp = (struct lpfc_nodelist *) mboxq->context2;
+	vport = mboxq->vport;
+	cmd = elsiocb->drvrTimeout;
+
+	if (cmd == ELS_CMD_ADISC) {
+		lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp);
+	} else {
+		lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
+			ndlp, NULL);
+	}
+	kfree(elsiocb);
+	mempool_free(mboxq, phba->mbox_mem_pool);
+}
+
+static int
+lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		struct lpfc_iocbq *cmdiocb)
+{
+	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_iocbq  *elsiocb;
+	struct lpfc_dmabuf *pcmd;
+	struct serv_parm   *sp;
+	struct lpfc_name   *pnn, *ppn;
+	struct ls_rjt stat;
+	ADISC *ap;
+	IOCB_t *icmd;
+	uint32_t *lp;
+	uint32_t cmd;
+
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	lp = (uint32_t *) pcmd->virt;
+
+	cmd = *lp++;
+	if (cmd == ELS_CMD_ADISC) {
+		ap = (ADISC *) lp;
+		pnn = (struct lpfc_name *) & ap->nodeName;
+		ppn = (struct lpfc_name *) & ap->portName;
+	} else {
+		sp = (struct serv_parm *) lp;
+		pnn = (struct lpfc_name *) & sp->nodeName;
+		ppn = (struct lpfc_name *) & sp->portName;
+	}
+
+	icmd = &cmdiocb->iocb;
+	if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
+
+		/*
+		 * As soon as  we send ACC, the remote NPort can
+		 * start sending us data. Thus, for SLI4 we must
+		 * resume the RPI before the ACC goes out.
+		 */
+		if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+			elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
+				GFP_KERNEL);
+			if (elsiocb) {
+
+				/* Save info from cmd IOCB used in rsp */
+				memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
+					sizeof(struct lpfc_iocbq));
+
+				/* Save the ELS cmd */
+				elsiocb->drvrTimeout = cmd;
+
+				lpfc_sli4_resume_rpi(ndlp,
+					lpfc_mbx_cmpl_resume_rpi, elsiocb);
+				goto out;
+			}
+		}
+
+		if (cmd == ELS_CMD_ADISC) {
+			lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
+		} else {
+			lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
+				ndlp, NULL);
+		}
+out:
+		/* If we are authenticated, move to the proper state */
+		if (ndlp->nlp_type & NLP_FCP_TARGET)
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
+		else
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+
+		return 1;
+	}
+	/* Reject this request because invalid parameters */
+	stat.un.b.lsRjtRsvd0 = 0;
+	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
+	stat.un.b.vendorUnique = 0;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+
+	/* 1 sec timeout */
+	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_DELAY_TMO;
+	spin_unlock_irq(shost->host_lock);
+	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+	ndlp->nlp_prev_state = ndlp->nlp_state;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+	return 0;
+}
+
+static int
+lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+	      struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba    *phba = vport->phba;
+	struct lpfc_vport **vports;
+	int i, active_vlink_present = 0 ;
+
+	/* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
+	/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
+	 * PLOGIs during LOGO storms from a device.
+	 */
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_LOGO_ACC;
+	spin_unlock_irq(shost->host_lock);
+	if (els_cmd == ELS_CMD_PRLO)
+		lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
+	else
+		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+	if (ndlp->nlp_DID == Fabric_DID) {
+		if (vport->port_state <= LPFC_FDISC)
+			goto out;
+		lpfc_linkdown_port(vport);
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= FC_VPORT_LOGO_RCVD;
+		spin_unlock_irq(shost->host_lock);
+		vports = lpfc_create_vport_work_array(phba);
+		if (vports) {
+			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
+					i++) {
+				if ((!(vports[i]->fc_flag &
+					FC_VPORT_LOGO_RCVD)) &&
+					(vports[i]->port_state > LPFC_FDISC)) {
+					active_vlink_present = 1;
+					break;
+				}
+			}
+			lpfc_destroy_vport_work_array(phba, vports);
+		}
+
+		if (active_vlink_present) {
+			/*
+			 * If there are other active VLinks present,
+			 * re-instantiate the Vlink using FDISC.
+			 */
+			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag |= NLP_DELAY_TMO;
+			spin_unlock_irq(shost->host_lock);
+			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+			vport->port_state = LPFC_FDISC;
+		} else {
+			spin_lock_irq(shost->host_lock);
+			phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
+			spin_unlock_irq(shost->host_lock);
+			lpfc_retry_pport_discovery(phba);
+		}
+	} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
+		((ndlp->nlp_type & NLP_FCP_TARGET) ||
+		!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
+		(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
+		/* Only try to re-login if this is NOT a Fabric Node */
+		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_DELAY_TMO;
+		spin_unlock_irq(shost->host_lock);
+
+		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+	}
+out:
+	ndlp->nlp_prev_state = ndlp->nlp_state;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+	spin_unlock_irq(shost->host_lock);
+	/* The driver has to wait until the ACC completes before it continues
+	 * processing the LOGO.  The action will resume in
+	 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
+	 * unreg_login, the driver waits so the ACC does not get aborted.
+	 */
+	return 0;
+}
+
+static void
+lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+	      struct lpfc_iocbq *cmdiocb)
+{
+	struct lpfc_dmabuf *pcmd;
+	uint32_t *lp;
+	PRLI *npr;
+	struct fc_rport *rport = ndlp->rport;
+	u32 roles;
+
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	lp = (uint32_t *) pcmd->virt;
+	npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
+
+	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+	if (npr->prliType == PRLI_FCP_TYPE) {
+		if (npr->initiatorFunc)
+			ndlp->nlp_type |= NLP_FCP_INITIATOR;
+		if (npr->targetFunc)
+			ndlp->nlp_type |= NLP_FCP_TARGET;
+		if (npr->Retry)
+			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+	}
+	if (rport) {
+		/* We need to update the rport role values */
+		roles = FC_RPORT_ROLE_UNKNOWN;
+		if (ndlp->nlp_type & NLP_FCP_INITIATOR)
+			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+		if (ndlp->nlp_type & NLP_FCP_TARGET)
+			roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+			"rport rolechg:   role:x%x did:x%x flg:x%x",
+			roles, ndlp->nlp_DID, ndlp->nlp_flag);
+
+		fc_remote_port_rolechg(rport, roles);
+	}
+}
+
+static uint32_t
+lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
+		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+		return 0;
+	}
+
+	if (!(vport->fc_flag & FC_PT2PT)) {
+		/* Check config parameter use-adisc or FCP-2 */
+		if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
+		    ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
+		     (ndlp->nlp_type & NLP_FCP_TARGET))) {
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag |= NLP_NPR_ADISC;
+			spin_unlock_irq(shost->host_lock);
+			return 1;
+		}
+	}
+	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+	lpfc_unreg_rpi(vport, ndlp);
+	return 0;
+}
+
+/**
+ * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
+ * @phba : Pointer to lpfc_hba structure.
+ * @vport: Pointer to lpfc_vport structure.
+ * @rpi  : rpi to be release.
+ *
+ * This function will send a unreg_login mailbox command to the firmware
+ * to release a rpi.
+ **/
+void
+lpfc_release_rpi(struct lpfc_hba *phba,
+		struct lpfc_vport *vport,
+		uint16_t rpi)
+{
+	LPFC_MBOXQ_t *pmb;
+	int rc;
+
+	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+			GFP_KERNEL);
+	if (!pmb)
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+			"2796 mailbox memory allocation failed \n");
+	else {
+		lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
+		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+		if (rc == MBX_NOT_FINISHED)
+			mempool_free(pmb, phba->mbox_mem_pool);
+	}
+}
+
+static uint32_t
+lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		  void *arg, uint32_t evt)
+{
+	struct lpfc_hba *phba;
+	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+	MAILBOX_t *mb;
+	uint16_t rpi;
+
+	phba = vport->phba;
+	/* Release the RPI if reglogin completing */
+	if (!(phba->pport->load_flag & FC_UNLOADING) &&
+		(evt == NLP_EVT_CMPL_REG_LOGIN) &&
+		(!pmb->u.mb.mbxStatus)) {
+		mb = &pmb->u.mb;
+		rpi = pmb->u.mb.un.varWords[0];
+		lpfc_release_rpi(phba, vport, rpi);
+	}
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+			 "0271 Illegal State Transition: node x%x "
+			 "event x%x, state x%x Data: x%x x%x\n",
+			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
+			 ndlp->nlp_flag);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		  void *arg, uint32_t evt)
+{
+	/* This transition is only legal if we previously
+	 * rcv'ed a PLOGI. Since we don't want 2 discovery threads
+	 * working on the same NPortID, do nothing for this thread
+	 * to stop it.
+	 */
+	if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+			 "0272 Illegal State Transition: node x%x "
+			 "event x%x, state x%x Data: x%x x%x\n",
+			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
+			 ndlp->nlp_flag);
+	}
+	return ndlp->nlp_state;
+}
+
+/* Start of Discovery State Machine routines */
+
+static uint32_t
+lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+
+	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+		return ndlp->nlp_state;
+	}
+	return NLP_STE_FREED_NODE;
+}
+
+static uint32_t
+lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	lpfc_issue_els_logo(vport, ndlp, 0);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_LOGO_ACC;
+	spin_unlock_irq(shost->host_lock);
+	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	return NLP_STE_FREED_NODE;
+}
+
+static uint32_t
+lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	return NLP_STE_FREED_NODE;
+}
+
+static uint32_t
+lpfc_device_recov_unused_node(struct lpfc_vport *vport,
+			struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_iocbq *cmdiocb = arg;
+	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+	uint32_t *lp = (uint32_t *) pcmd->virt;
+	struct serv_parm *sp = (struct serv_parm *) (lp + 1);
+	struct ls_rjt stat;
+	int port_cmp;
+
+	memset(&stat, 0, sizeof (struct ls_rjt));
+
+	/* For a PLOGI, we only accept if our portname is less
+	 * than the remote portname.
+	 */
+	phba->fc_stat.elsLogiCol++;
+	port_cmp = memcmp(&vport->fc_portname, &sp->portName,
+			  sizeof(struct lpfc_name));
+
+	if (port_cmp >= 0) {
+		/* Reject this request because the remote node will accept
+		   ours */
+		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+		stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
+		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+			NULL);
+	} else {
+		if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
+		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
+		    (vport->num_disc_nodes)) {
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+			spin_unlock_irq(shost->host_lock);
+			/* Check if there are more PLOGIs to be sent */
+			lpfc_more_plogi(vport);
+			if (vport->num_disc_nodes == 0) {
+				spin_lock_irq(shost->host_lock);
+				vport->fc_flag &= ~FC_NDISC_ACTIVE;
+				spin_unlock_irq(shost->host_lock);
+				lpfc_can_disctmo(vport);
+				lpfc_end_rscn(vport);
+			}
+		}
+	} /* If our portname was less */
+
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+	struct ls_rjt     stat;
+
+	memset(&stat, 0, sizeof (struct ls_rjt));
+	stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+				/* software abort outstanding PLOGI */
+	lpfc_els_abort(vport->phba, ndlp);
+
+	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	/* software abort outstanding PLOGI */
+	lpfc_els_abort(phba, ndlp);
+
+	if (evt == NLP_EVT_RCV_LOGO) {
+		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+	} else {
+		lpfc_issue_els_logo(vport, ndlp, 0);
+	}
+
+	/* Put ndlp in npr state set plogi timer for 1 sec */
+	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_DELAY_TMO;
+	spin_unlock_irq(shost->host_lock);
+	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
+			    struct lpfc_nodelist *ndlp,
+			    void *arg,
+			    uint32_t evt)
+{
+	struct lpfc_hba    *phba = vport->phba;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_iocbq  *cmdiocb, *rspiocb;
+	struct lpfc_dmabuf *pcmd, *prsp, *mp;
+	uint32_t *lp;
+	IOCB_t *irsp;
+	struct serv_parm *sp;
+	LPFC_MBOXQ_t *mbox;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+	rspiocb = cmdiocb->context_un.rsp_iocb;
+
+	if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
+		/* Recovery from PLOGI collision logic */
+		return ndlp->nlp_state;
+	}
+
+	irsp = &rspiocb->iocb;
+
+	if (irsp->ulpStatus)
+		goto out;
+
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+
+	lp = (uint32_t *) prsp->virt;
+	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+
+	/* Some switches have FDMI servers returning 0 for WWN */
+	if ((ndlp->nlp_DID != FDMI_DID) &&
+		(wwn_to_u64(sp->portName.u.wwn) == 0 ||
+		wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0142 PLOGI RSP: Invalid WWN.\n");
+		goto out;
+	}
+	if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
+		goto out;
+	/* PLOGI chkparm OK */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
+			 ndlp->nlp_DID, ndlp->nlp_state,
+			 ndlp->nlp_flag, ndlp->nlp_rpi);
+	if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
+		ndlp->nlp_fcp_info |= CLASS2;
+	else
+		ndlp->nlp_fcp_info |= CLASS3;
+
+	ndlp->nlp_class_sup = 0;
+	if (sp->cls1.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS1;
+	if (sp->cls2.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS2;
+	if (sp->cls3.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS3;
+	if (sp->cls4.classValid)
+		ndlp->nlp_class_sup |= FC_COS_CLASS4;
+	ndlp->nlp_maxframe =
+		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+			"0133 PLOGI: no memory for reg_login "
+			"Data: x%x x%x x%x x%x\n",
+			ndlp->nlp_DID, ndlp->nlp_state,
+			ndlp->nlp_flag, ndlp->nlp_rpi);
+		goto out;
+	}
+
+	lpfc_unreg_rpi(vport, ndlp);
+
+	if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
+			 (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
+		switch (ndlp->nlp_DID) {
+		case NameServer_DID:
+			mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
+			break;
+		case FDMI_DID:
+			mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
+			break;
+		default:
+			ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
+			mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+		}
+		mbox->context2 = lpfc_nlp_get(ndlp);
+		mbox->vport = vport;
+		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+		    != MBX_NOT_FINISHED) {
+			lpfc_nlp_set_state(vport, ndlp,
+					   NLP_STE_REG_LOGIN_ISSUE);
+			return ndlp->nlp_state;
+		}
+		if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
+			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
+		/* decrement node reference count to the failed mbox
+		 * command
+		 */
+		lpfc_nlp_put(ndlp);
+		mp = (struct lpfc_dmabuf *) mbox->context1;
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+		mempool_free(mbox, phba->mbox_mem_pool);
+
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0134 PLOGI: cannot issue reg_login "
+				 "Data: x%x x%x x%x x%x\n",
+				 ndlp->nlp_DID, ndlp->nlp_state,
+				 ndlp->nlp_flag, ndlp->nlp_rpi);
+	} else {
+		mempool_free(mbox, phba->mbox_mem_pool);
+
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0135 PLOGI: cannot format reg_login "
+				 "Data: x%x x%x x%x x%x\n",
+				 ndlp->nlp_DID, ndlp->nlp_state,
+				 ndlp->nlp_flag, ndlp->nlp_rpi);
+	}
+
+
+out:
+	if (ndlp->nlp_DID == NameServer_DID) {
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0261 Cannot Register NameServer login\n");
+	}
+
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_DEFER_RM;
+	spin_unlock_irq(shost->host_lock);
+	return NLP_STE_FREED_NODE;
+}
+
+static uint32_t
+lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
+	struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
+{
+	struct lpfc_hba *phba;
+	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+	MAILBOX_t *mb = &pmb->u.mb;
+	uint16_t rpi;
+
+	phba = vport->phba;
+	/* Release the RPI */
+	if (!(phba->pport->load_flag & FC_UNLOADING) &&
+		!mb->mbxStatus) {
+		rpi = pmb->u.mb.un.varWords[0];
+		lpfc_release_rpi(phba, vport, rpi);
+	}
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+		spin_unlock_irq(shost->host_lock);
+		return ndlp->nlp_state;
+	} else {
+		/* software abort outstanding PLOGI */
+		lpfc_els_abort(vport->phba, ndlp);
+
+		lpfc_drop_node(vport, ndlp);
+		return NLP_STE_FREED_NODE;
+	}
+}
+
+static uint32_t
+lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
+			      struct lpfc_nodelist *ndlp,
+			      void *arg,
+			      uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+
+	/* Don't do anything that will mess up processing of the
+	 * previous RSCN.
+	 */
+	if (vport->fc_flag & FC_RSCN_DEFERRED)
+		return ndlp->nlp_state;
+
+	/* software abort outstanding PLOGI */
+	lpfc_els_abort(phba, ndlp);
+
+	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+	spin_unlock_irq(shost->host_lock);
+
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_iocbq *cmdiocb;
+
+	/* software abort outstanding ADISC */
+	lpfc_els_abort(phba, ndlp);
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+
+	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+		if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+			spin_unlock_irq(shost->host_lock);
+			if (vport->num_disc_nodes)
+				lpfc_more_adisc(vport);
+		}
+		return ndlp->nlp_state;
+	}
+	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+	lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_iocbq *cmdiocb;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+
+	/* software abort outstanding ADISC */
+	lpfc_els_abort(phba, ndlp);
+
+	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
+			    struct lpfc_nodelist *ndlp,
+			    void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+
+	/* Treat like rcv logo */
+	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
+			    struct lpfc_nodelist *ndlp,
+			    void *arg, uint32_t evt)
+{
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_iocbq *cmdiocb, *rspiocb;
+	IOCB_t *irsp;
+	ADISC *ap;
+	int rc;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+	rspiocb = cmdiocb->context_un.rsp_iocb;
+
+	ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
+	irsp = &rspiocb->iocb;
+
+	if ((irsp->ulpStatus) ||
+	    (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
+		/* 1 sec timeout */
+		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_DELAY_TMO;
+		spin_unlock_irq(shost->host_lock);
+		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+
+		memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
+		memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
+
+		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		lpfc_unreg_rpi(vport, ndlp);
+		return ndlp->nlp_state;
+	}
+
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL);
+		if (rc) {
+			/* Stay in state and retry. */
+			ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+			return ndlp->nlp_state;
+		}
+	}
+
+	if (ndlp->nlp_type & NLP_FCP_TARGET) {
+		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
+	} else {
+		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+	}
+
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+		spin_unlock_irq(shost->host_lock);
+		return ndlp->nlp_state;
+	} else {
+		/* software abort outstanding ADISC */
+		lpfc_els_abort(vport->phba, ndlp);
+
+		lpfc_drop_node(vport, ndlp);
+		return NLP_STE_FREED_NODE;
+	}
+}
+
+static uint32_t
+lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
+			      struct lpfc_nodelist *ndlp,
+			      void *arg,
+			      uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+
+	/* Don't do anything that will mess up processing of the
+	 * previous RSCN.
+	 */
+	if (vport->fc_flag & FC_RSCN_DEFERRED)
+		return ndlp->nlp_state;
+
+	/* software abort outstanding ADISC */
+	lpfc_els_abort(phba, ndlp);
+
+	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+	spin_unlock_irq(shost->host_lock);
+	lpfc_disc_set_adisc(vport, ndlp);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
+			      struct lpfc_nodelist *ndlp,
+			      void *arg,
+			      uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
+			     struct lpfc_nodelist *ndlp,
+			     void *arg,
+			     uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
+			     struct lpfc_nodelist *ndlp,
+			     void *arg,
+			     uint32_t evt)
+{
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+	LPFC_MBOXQ_t	  *mb;
+	LPFC_MBOXQ_t	  *nextmb;
+	struct lpfc_dmabuf *mp;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+
+	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
+	if ((mb = phba->sli.mbox_active)) {
+		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
+		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+			lpfc_nlp_put(ndlp);
+			mb->context2 = NULL;
+			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		}
+	}
+
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
+		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
+		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+			mp = (struct lpfc_dmabuf *) (mb->context1);
+			if (mp) {
+				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
+				kfree(mp);
+			}
+			lpfc_nlp_put(ndlp);
+			list_del(&mb->list);
+			phba->sli.mboxq_cnt--;
+			mempool_free(mb, phba->mbox_mem_pool);
+		}
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
+			       struct lpfc_nodelist *ndlp,
+			       void *arg,
+			       uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
+			     struct lpfc_nodelist *ndlp,
+			     void *arg,
+			     uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
+				  struct lpfc_nodelist *ndlp,
+				  void *arg,
+				  uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+	MAILBOX_t *mb = &pmb->u.mb;
+	uint32_t did  = mb->un.varWords[1];
+
+	if (mb->mbxStatus) {
+		/* RegLogin failed */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+				"0246 RegLogin failed Data: x%x x%x x%x x%x "
+				 "x%x\n",
+				 did, mb->mbxStatus, vport->port_state,
+				 mb->un.varRegLogin.vpi,
+				 mb->un.varRegLogin.rpi);
+		/*
+		 * If RegLogin failed due to lack of HBA resources do not
+		 * retry discovery.
+		 */
+		if (mb->mbxStatus == MBXERR_RPI_FULL) {
+			ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+			return ndlp->nlp_state;
+		}
+
+		/* Put ndlp in npr state set plogi timer for 1 sec */
+		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_DELAY_TMO;
+		spin_unlock_irq(shost->host_lock);
+		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+
+		lpfc_issue_els_logo(vport, ndlp, 0);
+		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		return ndlp->nlp_state;
+	}
+
+	/* SLI4 ports have preallocated logical rpis. */
+	if (vport->phba->sli_rev < LPFC_SLI_REV4)
+		ndlp->nlp_rpi = mb->un.varWords[0];
+
+	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+
+	/* Only if we are not a fabric nport do we issue PRLI */
+	if (!(ndlp->nlp_type & NLP_FABRIC)) {
+		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
+		lpfc_issue_els_prli(vport, ndlp, 0);
+	} else {
+		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+	}
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
+			      struct lpfc_nodelist *ndlp,
+			      void *arg,
+			      uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+		spin_unlock_irq(shost->host_lock);
+		return ndlp->nlp_state;
+	} else {
+		lpfc_drop_node(vport, ndlp);
+		return NLP_STE_FREED_NODE;
+	}
+}
+
+static uint32_t
+lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
+				 struct lpfc_nodelist *ndlp,
+				 void *arg,
+				 uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	/* Don't do anything that will mess up processing of the
+	 * previous RSCN.
+	 */
+	if (vport->fc_flag & FC_RSCN_DEFERRED)
+		return ndlp->nlp_state;
+
+	ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
+	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+	spin_unlock_irq(shost->host_lock);
+	lpfc_disc_set_adisc(vport, ndlp);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	/* Software abort outstanding PRLI before sending acc */
+	lpfc_els_abort(vport->phba, ndlp);
+
+	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
+	return ndlp->nlp_state;
+}
+
+/* This routine is envoked when we rcv a PRLO request from a nport
+ * we are logged into.  We should send back a PRLO rsp setting the
+ * appropriate bits.
+ * NEXT STATE = PRLI_ISSUE
+ */
+static uint32_t
+lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_iocbq *cmdiocb, *rspiocb;
+	struct lpfc_hba   *phba = vport->phba;
+	IOCB_t *irsp;
+	PRLI *npr;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+	rspiocb = cmdiocb->context_un.rsp_iocb;
+	npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
+
+	irsp = &rspiocb->iocb;
+	if (irsp->ulpStatus) {
+		if ((vport->port_type == LPFC_NPIV_PORT) &&
+		    vport->cfg_restrict_login) {
+			goto out;
+		}
+		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+		return ndlp->nlp_state;
+	}
+
+	/* Check out PRLI rsp */
+	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+	if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
+	    (npr->prliType == PRLI_FCP_TYPE)) {
+		if (npr->initiatorFunc)
+			ndlp->nlp_type |= NLP_FCP_INITIATOR;
+		if (npr->targetFunc)
+			ndlp->nlp_type |= NLP_FCP_TARGET;
+		if (npr->Retry)
+			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+	}
+	if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
+	    (vport->port_type == LPFC_NPIV_PORT) &&
+	     vport->cfg_restrict_login) {
+out:
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_TARGET_REMOVE;
+		spin_unlock_irq(shost->host_lock);
+		lpfc_issue_els_logo(vport, ndlp, 0);
+
+		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+		return ndlp->nlp_state;
+	}
+
+	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
+	if (ndlp->nlp_type & NLP_FCP_TARGET)
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
+	else
+		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+	return ndlp->nlp_state;
+}
+
+/*! lpfc_device_rm_prli_issue
+ *
+ * \pre
+ * \post
+ * \param   phba
+ * \param   ndlp
+ * \param   arg
+ * \param   evt
+ * \return  uint32_t
+ *
+ * \b Description:
+ *    This routine is envoked when we a request to remove a nport we are in the
+ *    process of PRLIing. We should software abort outstanding prli, unreg
+ *    login, send a logout. We will change node state to UNUSED_NODE, put it
+ *    on plogi list so it can be freed when LOGO completes.
+ *
+ */
+
+static uint32_t
+lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+		spin_unlock_irq(shost->host_lock);
+		return ndlp->nlp_state;
+	} else {
+		/* software abort outstanding PLOGI */
+		lpfc_els_abort(vport->phba, ndlp);
+
+		lpfc_drop_node(vport, ndlp);
+		return NLP_STE_FREED_NODE;
+	}
+}
+
+
+/*! lpfc_device_recov_prli_issue
+ *
+ * \pre
+ * \post
+ * \param   phba
+ * \param   ndlp
+ * \param   arg
+ * \param   evt
+ * \return  uint32_t
+ *
+ * \b Description:
+ *    The routine is envoked when the state of a device is unknown, like
+ *    during a link down. We should remove the nodelist entry from the
+ *    unmapped list, issue a UNREG_LOGIN, do a software abort of the
+ *    outstanding PRLI command, then free the node entry.
+ */
+static uint32_t
+lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
+			     struct lpfc_nodelist *ndlp,
+			     void *arg,
+			     uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_hba  *phba = vport->phba;
+
+	/* Don't do anything that will mess up processing of the
+	 * previous RSCN.
+	 */
+	if (vport->fc_flag & FC_RSCN_DEFERRED)
+		return ndlp->nlp_state;
+
+	/* software abort outstanding PRLI */
+	lpfc_els_abort(phba, ndlp);
+
+	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+	spin_unlock_irq(shost->host_lock);
+	lpfc_disc_set_adisc(vport, ndlp);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_prli(vport, ndlp, cmdiocb);
+	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
+			     struct lpfc_nodelist *ndlp,
+			     void *arg,
+			     uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+	spin_unlock_irq(shost->host_lock);
+	lpfc_disc_set_adisc(vport, ndlp);
+
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
+			    struct lpfc_nodelist *ndlp,
+			    void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			  void *arg, uint32_t evt)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	/* flush the target */
+	lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
+			    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+
+	/* Treat like rcv logo */
+	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
+			      struct lpfc_nodelist *ndlp,
+			      void *arg,
+			      uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
+	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+	spin_unlock_irq(shost->host_lock);
+	lpfc_disc_set_adisc(vport, ndlp);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			void *arg, uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_iocbq *cmdiocb  = (struct lpfc_iocbq *) arg;
+
+	/* Ignore PLOGI if we have an outstanding LOGO */
+	if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
+		return ndlp->nlp_state;
+	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+		lpfc_cancel_retry_delay_tmo(vport, ndlp);
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
+		spin_unlock_irq(shost->host_lock);
+	} else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
+		/* send PLOGI immediately, move to PLOGI issue state */
+		if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+		}
+	}
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		       void *arg, uint32_t evt)
+{
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+	struct ls_rjt     stat;
+
+	memset(&stat, 0, sizeof (struct ls_rjt));
+	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+
+	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
+			spin_lock_irq(shost->host_lock);
+			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+			spin_unlock_irq(shost->host_lock);
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+			lpfc_issue_els_adisc(vport, ndlp, 0);
+		} else {
+			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+		}
+	}
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_logo_npr_node(struct lpfc_vport *vport,  struct lpfc_nodelist *ndlp,
+		       void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
+	/*
+	 * Do not start discovery if discovery is about to start
+	 * or discovery in progress for this node. Starting discovery
+	 * here will affect the counting of discovery threads.
+	 */
+	if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
+	    !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
+		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
+			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+			lpfc_issue_els_adisc(vport, ndlp, 0);
+		} else {
+			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+		}
+	}
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+		       void *arg, uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag |= NLP_LOGO_ACC;
+	spin_unlock_irq(shost->host_lock);
+
+	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+
+	if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
+		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_DELAY_TMO;
+		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+		spin_unlock_irq(shost->host_lock);
+		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+	} else {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+		spin_unlock_irq(shost->host_lock);
+	}
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb, *rspiocb;
+	IOCB_t *irsp;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+	rspiocb = cmdiocb->context_un.rsp_iocb;
+
+	irsp = &rspiocb->iocb;
+	if (irsp->ulpStatus) {
+		ndlp->nlp_flag |= NLP_DEFER_RM;
+		return NLP_STE_FREED_NODE;
+	}
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb, *rspiocb;
+	IOCB_t *irsp;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+	rspiocb = cmdiocb->context_un.rsp_iocb;
+
+	irsp = &rspiocb->iocb;
+	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
+		lpfc_drop_node(vport, ndlp);
+		return NLP_STE_FREED_NODE;
+	}
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			void *arg, uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+	if (ndlp->nlp_DID == Fabric_DID) {
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+		spin_unlock_irq(shost->host_lock);
+	}
+	lpfc_unreg_rpi(vport, ndlp);
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			 void *arg, uint32_t evt)
+{
+	struct lpfc_iocbq *cmdiocb, *rspiocb;
+	IOCB_t *irsp;
+
+	cmdiocb = (struct lpfc_iocbq *) arg;
+	rspiocb = cmdiocb->context_un.rsp_iocb;
+
+	irsp = &rspiocb->iocb;
+	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
+		lpfc_drop_node(vport, ndlp);
+		return NLP_STE_FREED_NODE;
+	}
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
+			    struct lpfc_nodelist *ndlp,
+			    void *arg, uint32_t evt)
+{
+	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+	MAILBOX_t    *mb = &pmb->u.mb;
+
+	if (!mb->mbxStatus) {
+		/* SLI4 ports have preallocated logical rpis. */
+		if (vport->phba->sli_rev < LPFC_SLI_REV4)
+			ndlp->nlp_rpi = mb->un.varWords[0];
+		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+	} else {
+		if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
+			lpfc_drop_node(vport, ndlp);
+			return NLP_STE_FREED_NODE;
+		}
+	}
+	return ndlp->nlp_state;
+}
+
+static uint32_t
+lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			void *arg, uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+		spin_lock_irq(shost->host_lock);
+		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+		spin_unlock_irq(shost->host_lock);
+		return ndlp->nlp_state;
+	}
+	lpfc_drop_node(vport, ndlp);
+	return NLP_STE_FREED_NODE;
+}
+
+static uint32_t
+lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			   void *arg, uint32_t evt)
+{
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	/* Don't do anything that will mess up processing of the
+	 * previous RSCN.
+	 */
+	if (vport->fc_flag & FC_RSCN_DEFERRED)
+		return ndlp->nlp_state;
+
+	lpfc_cancel_retry_delay_tmo(vport, ndlp);
+	spin_lock_irq(shost->host_lock);
+	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+	spin_unlock_irq(shost->host_lock);
+	return ndlp->nlp_state;
+}
+
+
+/* This next section defines the NPort Discovery State Machine */
+
+/* There are 4 different double linked lists nodelist entries can reside on.
+ * The plogi list and adisc list are used when Link Up discovery or RSCN
+ * processing is needed. Each list holds the nodes that we will send PLOGI
+ * or ADISC on. These lists will keep track of what nodes will be effected
+ * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
+ * The unmapped_list will contain all nodes that we have successfully logged
+ * into at the Fibre Channel level. The mapped_list will contain all nodes
+ * that are mapped FCP targets.
+ */
+/*
+ * The bind list is a list of undiscovered (potentially non-existent) nodes
+ * that we have saved binding information on. This information is used when
+ * nodes transition from the unmapped to the mapped list.
+ */
+/* For UNUSED_NODE state, the node has just been allocated .
+ * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
+ * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
+ * and put on the unmapped list. For ADISC processing, the node is taken off
+ * the ADISC list and placed on either the mapped or unmapped list (depending
+ * on its previous state). Once on the unmapped list, a PRLI is issued and the
+ * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
+ * changed to UNMAPPED_NODE. If the completion indicates a mapped
+ * node, the node is taken off the unmapped list. The binding list is checked
+ * for a valid binding, or a binding is automatically assigned. If binding
+ * assignment is unsuccessful, the node is left on the unmapped list. If
+ * binding assignment is successful, the associated binding list entry (if
+ * any) is removed, and the node is placed on the mapped list.
+ */
+/*
+ * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
+ * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
+ * expire, all effected nodes will receive a DEVICE_RM event.
+ */
+/*
+ * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
+ * to either the ADISC or PLOGI list.  After a Nameserver query or ALPA loopmap
+ * check, additional nodes may be added or removed (via DEVICE_RM) to / from
+ * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
+ * we will first process the ADISC list.  32 entries are processed initially and
+ * ADISC is initited for each one.  Completions / Events for each node are
+ * funnelled thru the state machine.  As each node finishes ADISC processing, it
+ * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
+ * waiting, and the ADISC list count is identically 0, then we are done. For
+ * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
+ * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
+ * list.  32 entries are processed initially and PLOGI is initited for each one.
+ * Completions / Events for each node are funnelled thru the state machine.  As
+ * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
+ * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
+ * indentically 0, then we are done. We have now completed discovery / RSCN
+ * handling. Upon completion, ALL nodes should be on either the mapped or
+ * unmapped lists.
+ */
+
+static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
+     (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
+	/* Action routine                  Event       Current State  */
+	lpfc_rcv_plogi_unused_node,	/* RCV_PLOGI   UNUSED_NODE    */
+	lpfc_rcv_els_unused_node,	/* RCV_PRLI        */
+	lpfc_rcv_logo_unused_node,	/* RCV_LOGO        */
+	lpfc_rcv_els_unused_node,	/* RCV_ADISC       */
+	lpfc_rcv_els_unused_node,	/* RCV_PDISC       */
+	lpfc_rcv_els_unused_node,	/* RCV_PRLO        */
+	lpfc_disc_illegal,		/* CMPL_PLOGI      */
+	lpfc_disc_illegal,		/* CMPL_PRLI       */
+	lpfc_cmpl_logo_unused_node,	/* CMPL_LOGO       */
+	lpfc_disc_illegal,		/* CMPL_ADISC      */
+	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
+	lpfc_device_rm_unused_node,	/* DEVICE_RM       */
+	lpfc_device_recov_unused_node,	/* DEVICE_RECOVERY */
+
+	lpfc_rcv_plogi_plogi_issue,	/* RCV_PLOGI   PLOGI_ISSUE    */
+	lpfc_rcv_prli_plogi_issue,	/* RCV_PRLI        */
+	lpfc_rcv_logo_plogi_issue,	/* RCV_LOGO        */
+	lpfc_rcv_els_plogi_issue,	/* RCV_ADISC       */
+	lpfc_rcv_els_plogi_issue,	/* RCV_PDISC       */
+	lpfc_rcv_els_plogi_issue,	/* RCV_PRLO        */
+	lpfc_cmpl_plogi_plogi_issue,	/* CMPL_PLOGI      */
+	lpfc_disc_illegal,		/* CMPL_PRLI       */
+	lpfc_cmpl_logo_plogi_issue,	/* CMPL_LOGO       */
+	lpfc_disc_illegal,		/* CMPL_ADISC      */
+	lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN  */
+	lpfc_device_rm_plogi_issue,	/* DEVICE_RM       */
+	lpfc_device_recov_plogi_issue,	/* DEVICE_RECOVERY */
+
+	lpfc_rcv_plogi_adisc_issue,	/* RCV_PLOGI   ADISC_ISSUE    */
+	lpfc_rcv_prli_adisc_issue,	/* RCV_PRLI        */
+	lpfc_rcv_logo_adisc_issue,	/* RCV_LOGO        */
+	lpfc_rcv_padisc_adisc_issue,	/* RCV_ADISC       */
+	lpfc_rcv_padisc_adisc_issue,	/* RCV_PDISC       */
+	lpfc_rcv_prlo_adisc_issue,	/* RCV_PRLO        */
+	lpfc_disc_illegal,		/* CMPL_PLOGI      */
+	lpfc_disc_illegal,		/* CMPL_PRLI       */
+	lpfc_disc_illegal,		/* CMPL_LOGO       */
+	lpfc_cmpl_adisc_adisc_issue,	/* CMPL_ADISC      */
+	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
+	lpfc_device_rm_adisc_issue,	/* DEVICE_RM       */
+	lpfc_device_recov_adisc_issue,	/* DEVICE_RECOVERY */
+
+	lpfc_rcv_plogi_reglogin_issue,	/* RCV_PLOGI  REG_LOGIN_ISSUE */
+	lpfc_rcv_prli_reglogin_issue,	/* RCV_PLOGI       */
+	lpfc_rcv_logo_reglogin_issue,	/* RCV_LOGO        */
+	lpfc_rcv_padisc_reglogin_issue,	/* RCV_ADISC       */
+	lpfc_rcv_padisc_reglogin_issue,	/* RCV_PDISC       */
+	lpfc_rcv_prlo_reglogin_issue,	/* RCV_PRLO        */
+	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
+	lpfc_disc_illegal,		/* CMPL_PRLI       */
+	lpfc_disc_illegal,		/* CMPL_LOGO       */
+	lpfc_disc_illegal,		/* CMPL_ADISC      */
+	lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN  */
+	lpfc_device_rm_reglogin_issue,	/* DEVICE_RM       */
+	lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
+
+	lpfc_rcv_plogi_prli_issue,	/* RCV_PLOGI   PRLI_ISSUE     */
+	lpfc_rcv_prli_prli_issue,	/* RCV_PRLI        */
+	lpfc_rcv_logo_prli_issue,	/* RCV_LOGO        */
+	lpfc_rcv_padisc_prli_issue,	/* RCV_ADISC       */
+	lpfc_rcv_padisc_prli_issue,	/* RCV_PDISC       */
+	lpfc_rcv_prlo_prli_issue,	/* RCV_PRLO        */
+	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
+	lpfc_cmpl_prli_prli_issue,	/* CMPL_PRLI       */
+	lpfc_disc_illegal,		/* CMPL_LOGO       */
+	lpfc_disc_illegal,		/* CMPL_ADISC      */
+	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
+	lpfc_device_rm_prli_issue,	/* DEVICE_RM       */
+	lpfc_device_recov_prli_issue,	/* DEVICE_RECOVERY */
+
+	lpfc_rcv_plogi_unmap_node,	/* RCV_PLOGI   UNMAPPED_NODE  */
+	lpfc_rcv_prli_unmap_node,	/* RCV_PRLI        */
+	lpfc_rcv_logo_unmap_node,	/* RCV_LOGO        */
+	lpfc_rcv_padisc_unmap_node,	/* RCV_ADISC       */
+	lpfc_rcv_padisc_unmap_node,	/* RCV_PDISC       */
+	lpfc_rcv_prlo_unmap_node,	/* RCV_PRLO        */
+	lpfc_disc_illegal,		/* CMPL_PLOGI      */
+	lpfc_disc_illegal,		/* CMPL_PRLI       */
+	lpfc_disc_illegal,		/* CMPL_LOGO       */
+	lpfc_disc_illegal,		/* CMPL_ADISC      */
+	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
+	lpfc_disc_illegal,		/* DEVICE_RM       */
+	lpfc_device_recov_unmap_node,	/* DEVICE_RECOVERY */
+
+	lpfc_rcv_plogi_mapped_node,	/* RCV_PLOGI   MAPPED_NODE    */
+	lpfc_rcv_prli_mapped_node,	/* RCV_PRLI        */
+	lpfc_rcv_logo_mapped_node,	/* RCV_LOGO        */
+	lpfc_rcv_padisc_mapped_node,	/* RCV_ADISC       */
+	lpfc_rcv_padisc_mapped_node,	/* RCV_PDISC       */
+	lpfc_rcv_prlo_mapped_node,	/* RCV_PRLO        */
+	lpfc_disc_illegal,		/* CMPL_PLOGI      */
+	lpfc_disc_illegal,		/* CMPL_PRLI       */
+	lpfc_disc_illegal,		/* CMPL_LOGO       */
+	lpfc_disc_illegal,		/* CMPL_ADISC      */
+	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
+	lpfc_disc_illegal,		/* DEVICE_RM       */
+	lpfc_device_recov_mapped_node,	/* DEVICE_RECOVERY */
+
+	lpfc_rcv_plogi_npr_node,        /* RCV_PLOGI   NPR_NODE    */
+	lpfc_rcv_prli_npr_node,         /* RCV_PRLI        */
+	lpfc_rcv_logo_npr_node,         /* RCV_LOGO        */
+	lpfc_rcv_padisc_npr_node,       /* RCV_ADISC       */
+	lpfc_rcv_padisc_npr_node,       /* RCV_PDISC       */
+	lpfc_rcv_prlo_npr_node,         /* RCV_PRLO        */
+	lpfc_cmpl_plogi_npr_node,	/* CMPL_PLOGI      */
+	lpfc_cmpl_prli_npr_node,	/* CMPL_PRLI       */
+	lpfc_cmpl_logo_npr_node,        /* CMPL_LOGO       */
+	lpfc_cmpl_adisc_npr_node,       /* CMPL_ADISC      */
+	lpfc_cmpl_reglogin_npr_node,    /* CMPL_REG_LOGIN  */
+	lpfc_device_rm_npr_node,        /* DEVICE_RM       */
+	lpfc_device_recov_npr_node,     /* DEVICE_RECOVERY */
+};
+
+int
+lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			void *arg, uint32_t evt)
+{
+	uint32_t cur_state, rc;
+	uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
+			 uint32_t);
+	uint32_t got_ndlp = 0;
+
+	if (lpfc_nlp_get(ndlp))
+		got_ndlp = 1;
+
+	cur_state = ndlp->nlp_state;
+
+	/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0211 DSM in event x%x on NPort x%x in "
+			 "state %d Data: x%x\n",
+			 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
+		 "DSM in:          evt:%d ste:%d did:x%x",
+		evt, cur_state, ndlp->nlp_DID);
+
+	func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
+	rc = (func) (vport, ndlp, arg, evt);
+
+	/* DSM out state <rc> on NPort <nlp_DID> */
+	if (got_ndlp) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			 "0212 DSM out state %d on NPort x%x Data: x%x\n",
+			 rc, ndlp->nlp_DID, ndlp->nlp_flag);
+
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
+			"DSM out:         ste:%d did:x%x flg:x%x",
+			rc, ndlp->nlp_DID, ndlp->nlp_flag);
+		/* Decrement the ndlp reference count held for this function */
+		lpfc_nlp_put(ndlp);
+	} else {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+			"0213 DSM out state %d on NPort free\n", rc);
+
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
+			"DSM out:         ste:%d did:x%x flg:x%x",
+			rc, 0, 0);
+	}
+
+	return rc;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_scsi.c b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_scsi.c
new file mode 100644
index 0000000..88f3a83
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_scsi.c
@@ -0,0 +1,5031 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <asm/unaligned.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_version.h"
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+
+#define LPFC_RESET_WAIT  2
+#define LPFC_ABORT_WAIT  2
+
+int _dump_buf_done;
+
+static char *dif_op_str[] = {
+	"PROT_NORMAL",
+	"PROT_READ_INSERT",
+	"PROT_WRITE_STRIP",
+	"PROT_READ_STRIP",
+	"PROT_WRITE_INSERT",
+	"PROT_READ_PASS",
+	"PROT_WRITE_PASS",
+};
+
+static char *dif_grd_str[] = {
+	"NO_GUARD",
+	"DIF_CRC",
+	"DIX_IP",
+};
+
+struct scsi_dif_tuple {
+	__be16 guard_tag;       /* Checksum */
+	__be16 app_tag;         /* Opaque storage */
+	__be32 ref_tag;         /* Target LBA or indirect LBA */
+};
+
+static void
+lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
+static void
+lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
+
+static void
+lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
+{
+	void *src, *dst;
+	struct scatterlist *sgde = scsi_sglist(cmnd);
+
+	if (!_dump_buf_data) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
+				__func__);
+		return;
+	}
+
+
+	if (!sgde) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9051 BLKGRD: ERROR: data scatterlist is null\n");
+		return;
+	}
+
+	dst = (void *) _dump_buf_data;
+	while (sgde) {
+		src = sg_virt(sgde);
+		memcpy(dst, src, sgde->length);
+		dst += sgde->length;
+		sgde = sg_next(sgde);
+	}
+}
+
+static void
+lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
+{
+	void *src, *dst;
+	struct scatterlist *sgde = scsi_prot_sglist(cmnd);
+
+	if (!_dump_buf_dif) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
+				__func__);
+		return;
+	}
+
+	if (!sgde) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9053 BLKGRD: ERROR: prot scatterlist is null\n");
+		return;
+	}
+
+	dst = _dump_buf_dif;
+	while (sgde) {
+		src = sg_virt(sgde);
+		memcpy(dst, src, sgde->length);
+		dst += sgde->length;
+		sgde = sg_next(sgde);
+	}
+}
+
+/**
+ * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
+ * @phba: Pointer to HBA object.
+ * @lpfc_cmd: lpfc scsi command object pointer.
+ *
+ * This function is called from the lpfc_prep_task_mgmt_cmd function to
+ * set the last bit in the response sge entry.
+ **/
+static void
+lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
+				struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+	if (sgl) {
+		sgl += 1;
+		sgl->word2 = le32_to_cpu(sgl->word2);
+		bf_set(lpfc_sli4_sge_last, sgl, 1);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+	}
+}
+
+/**
+ * lpfc_update_stats - Update statistical data for the command completion
+ * @phba: Pointer to HBA object.
+ * @lpfc_cmd: lpfc scsi command object pointer.
+ *
+ * This function is called when there is a command completion and this
+ * function updates the statistical data for the command completion.
+ **/
+static void
+lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
+{
+	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
+	struct lpfc_nodelist *pnode = rdata->pnode;
+	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+	unsigned long flags;
+	struct Scsi_Host  *shost = cmd->device->host;
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	unsigned long latency;
+	int i;
+
+	if (cmd->result)
+		return;
+
+	latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
+
+	spin_lock_irqsave(shost->host_lock, flags);
+	if (!vport->stat_data_enabled ||
+		vport->stat_data_blocked ||
+		!pnode ||
+		!pnode->lat_data ||
+		(phba->bucket_type == LPFC_NO_BUCKET)) {
+		spin_unlock_irqrestore(shost->host_lock, flags);
+		return;
+	}
+
+	if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
+		i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
+			phba->bucket_step;
+		/* check array subscript bounds */
+		if (i < 0)
+			i = 0;
+		else if (i >= LPFC_MAX_BUCKET_COUNT)
+			i = LPFC_MAX_BUCKET_COUNT - 1;
+	} else {
+		for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
+			if (latency <= (phba->bucket_base +
+				((1<<i)*phba->bucket_step)))
+				break;
+	}
+
+	pnode->lat_data[i].cmd_count++;
+	spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/**
+ * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
+ * @phba: Pointer to HBA context object.
+ * @vport: Pointer to vport object.
+ * @ndlp: Pointer to FC node associated with the target.
+ * @lun: Lun number of the scsi device.
+ * @old_val: Old value of the queue depth.
+ * @new_val: New value of the queue depth.
+ *
+ * This function sends an event to the mgmt application indicating
+ * there is a change in the scsi device queue depth.
+ **/
+static void
+lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
+		struct lpfc_vport  *vport,
+		struct lpfc_nodelist *ndlp,
+		uint32_t lun,
+		uint32_t old_val,
+		uint32_t new_val)
+{
+	struct lpfc_fast_path_event *fast_path_evt;
+	unsigned long flags;
+
+	fast_path_evt = lpfc_alloc_fast_evt(phba);
+	if (!fast_path_evt)
+		return;
+
+	fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
+		FC_REG_SCSI_EVENT;
+	fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
+		LPFC_EVENT_VARQUEDEPTH;
+
+	/* Report all luns with change in queue depth */
+	fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+		memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
+			&ndlp->nlp_portname, sizeof(struct lpfc_name));
+		memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
+			&ndlp->nlp_nodename, sizeof(struct lpfc_name));
+	}
+
+	fast_path_evt->un.queue_depth_evt.oldval = old_val;
+	fast_path_evt->un.queue_depth_evt.newval = new_val;
+	fast_path_evt->vport = vport;
+
+	fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
+	spin_lock_irqsave(&phba->hbalock, flags);
+	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	lpfc_worker_wake_up(phba);
+
+	return;
+}
+
+/**
+ * lpfc_change_queue_depth - Alter scsi device queue depth
+ * @sdev: Pointer the scsi device on which to change the queue depth.
+ * @qdepth: New queue depth to set the sdev to.
+ * @reason: The reason for the queue depth change.
+ *
+ * This function is called by the midlayer and the LLD to alter the queue
+ * depth for a scsi device. This function sets the queue depth to the new
+ * value and sends an event out to log the queue depth change.
+ **/
+int
+lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_rport_data *rdata;
+	unsigned long new_queue_depth, old_queue_depth;
+
+	old_queue_depth = sdev->queue_depth;
+	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+	new_queue_depth = sdev->queue_depth;
+	rdata = sdev->hostdata;
+	if (rdata)
+		lpfc_send_sdev_queuedepth_change_event(phba, vport,
+						       rdata->pnode, sdev->lun,
+						       old_queue_depth,
+						       new_queue_depth);
+	return sdev->queue_depth;
+}
+
+/**
+ * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine is called when there is resource error in driver or firmware.
+ * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
+ * posts at most 1 event each second. This routine wakes up worker thread of
+ * @phba to process WORKER_RAM_DOWN_EVENT event.
+ *
+ * This routine should be called with no lock held.
+ **/
+void
+lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
+{
+	unsigned long flags;
+	uint32_t evt_posted;
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	atomic_inc(&phba->num_rsrc_err);
+	phba->last_rsrc_error_time = jiffies;
+
+	if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		return;
+	}
+
+	phba->last_ramp_down_time = jiffies;
+
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
+	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
+	evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
+	if (!evt_posted)
+		phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
+	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
+
+	if (!evt_posted)
+		lpfc_worker_wake_up(phba);
+	return;
+}
+
+/**
+ * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
+ * post at most 1 event every 5 minute after last_ramp_up_time or
+ * last_rsrc_error_time.  This routine wakes up worker thread of @phba
+ * to process WORKER_RAM_DOWN_EVENT event.
+ *
+ * This routine should be called with no lock held.
+ **/
+static inline void
+lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
+			uint32_t queue_depth)
+{
+	unsigned long flags;
+	struct lpfc_hba *phba = vport->phba;
+	uint32_t evt_posted;
+	atomic_inc(&phba->num_cmd_success);
+
+	if (vport->cfg_lun_queue_depth <= queue_depth)
+		return;
+	spin_lock_irqsave(&phba->hbalock, flags);
+	if (time_before(jiffies,
+			phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
+	    time_before(jiffies,
+			phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
+		spin_unlock_irqrestore(&phba->hbalock, flags);
+		return;
+	}
+	phba->last_ramp_up_time = jiffies;
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
+	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
+	evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
+	if (!evt_posted)
+		phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
+	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
+
+	if (!evt_posted)
+		lpfc_worker_wake_up(phba);
+	return;
+}
+
+/**
+ * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
+ * thread.This routine reduces queue depth for all scsi device on each vport
+ * associated with @phba.
+ **/
+void
+lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	struct Scsi_Host  *shost;
+	struct scsi_device *sdev;
+	unsigned long new_queue_depth;
+	unsigned long num_rsrc_err, num_cmd_success;
+	int i;
+
+	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
+	num_cmd_success = atomic_read(&phba->num_cmd_success);
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			shost = lpfc_shost_from_vport(vports[i]);
+			shost_for_each_device(sdev, shost) {
+				new_queue_depth =
+					sdev->queue_depth * num_rsrc_err /
+					(num_rsrc_err + num_cmd_success);
+				if (!new_queue_depth)
+					new_queue_depth = sdev->queue_depth - 1;
+				else
+					new_queue_depth = sdev->queue_depth -
+								new_queue_depth;
+				lpfc_change_queue_depth(sdev, new_queue_depth,
+							SCSI_QDEPTH_DEFAULT);
+			}
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+	atomic_set(&phba->num_rsrc_err, 0);
+	atomic_set(&phba->num_cmd_success, 0);
+}
+
+/**
+ * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine is called to  process WORKER_RAMP_UP_QUEUE event for worker
+ * thread.This routine increases queue depth for all scsi device on each vport
+ * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
+ * num_cmd_success to zero.
+ **/
+void
+lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	struct Scsi_Host  *shost;
+	struct scsi_device *sdev;
+	int i;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			shost = lpfc_shost_from_vport(vports[i]);
+			shost_for_each_device(sdev, shost) {
+				if (vports[i]->cfg_lun_queue_depth <=
+				    sdev->queue_depth)
+					continue;
+				lpfc_change_queue_depth(sdev,
+							sdev->queue_depth+1,
+							SCSI_QDEPTH_RAMP_UP);
+			}
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+	atomic_set(&phba->num_rsrc_err, 0);
+	atomic_set(&phba->num_cmd_success, 0);
+}
+
+/**
+ * lpfc_scsi_dev_block - set all scsi hosts to block state
+ * @phba: Pointer to HBA context object.
+ *
+ * This function walks vport list and set each SCSI host to block state
+ * by invoking fc_remote_port_delete() routine. This function is invoked
+ * with EEH when device's PCI slot has been permanently disabled.
+ **/
+void
+lpfc_scsi_dev_block(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	struct Scsi_Host  *shost;
+	struct scsi_device *sdev;
+	struct fc_rport *rport;
+	int i;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+			shost = lpfc_shost_from_vport(vports[i]);
+			shost_for_each_device(sdev, shost) {
+				rport = starget_to_rport(scsi_target(sdev));
+				fc_remote_port_delete(rport);
+			}
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
+ * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
+ * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
+ *
+ * This routine allocates a scsi buffer for device with SLI-3 interface spec,
+ * the scsi buffer contains all the necessary information needed to initiate
+ * a SCSI I/O. The non-DMAable buffer region contains information to build
+ * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
+ * and the initial BPL. In addition to allocating memory, the FCP CMND and
+ * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
+ *
+ * Return codes:
+ *   int - number of scsi buffers that were allocated.
+ *   0 = failure, less than num_to_alloc is a partial failure.
+ **/
+static int
+lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_scsi_buf *psb;
+	struct ulp_bde64 *bpl;
+	IOCB_t *iocb;
+	dma_addr_t pdma_phys_fcp_cmd;
+	dma_addr_t pdma_phys_fcp_rsp;
+	dma_addr_t pdma_phys_bpl;
+	uint16_t iotag;
+	int bcnt;
+
+	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
+		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
+		if (!psb)
+			break;
+
+		/*
+		 * Get memory from the pci pool to map the virt space to pci
+		 * bus space for an I/O.  The DMA buffer includes space for the
+		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
+		 * necessary to support the sg_tablesize.
+		 */
+		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
+					GFP_KERNEL, &psb->dma_handle);
+		if (!psb->data) {
+			kfree(psb);
+			break;
+		}
+
+		/* Initialize virtual ptrs to dma_buf region. */
+		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
+
+		/* Allocate iotag for psb->cur_iocbq. */
+		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
+		if (iotag == 0) {
+			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+					psb->data, psb->dma_handle);
+			kfree(psb);
+			break;
+		}
+		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
+
+		psb->fcp_cmnd = psb->data;
+		psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
+		psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
+			sizeof(struct fcp_rsp);
+
+		/* Initialize local short-hand pointers. */
+		bpl = psb->fcp_bpl;
+		pdma_phys_fcp_cmd = psb->dma_handle;
+		pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
+		pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
+			sizeof(struct fcp_rsp);
+
+		/*
+		 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
+		 * are sg list bdes.  Initialize the first two and leave the
+		 * rest for queuecommand.
+		 */
+		bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
+		bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
+		bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
+		bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
+
+		/* Setup the physical region for the FCP RSP */
+		bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
+		bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
+		bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
+		bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
+
+		/*
+		 * Since the IOCB for the FCP I/O is built into this
+		 * lpfc_scsi_buf, initialize it with all known data now.
+		 */
+		iocb = &psb->cur_iocbq.iocb;
+		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
+		if ((phba->sli_rev == 3) &&
+				!(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
+			/* fill in immediate fcp command BDE */
+			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
+			iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
+			iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
+					unsli3.fcp_ext.icd);
+			iocb->un.fcpi64.bdl.addrHigh = 0;
+			iocb->ulpBdeCount = 0;
+			iocb->ulpLe = 0;
+			/* fill in response BDE */
+			iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
+							BUFF_TYPE_BDE_64;
+			iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
+				sizeof(struct fcp_rsp);
+			iocb->unsli3.fcp_ext.rbde.addrLow =
+				putPaddrLow(pdma_phys_fcp_rsp);
+			iocb->unsli3.fcp_ext.rbde.addrHigh =
+				putPaddrHigh(pdma_phys_fcp_rsp);
+		} else {
+			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+			iocb->un.fcpi64.bdl.bdeSize =
+					(2 * sizeof(struct ulp_bde64));
+			iocb->un.fcpi64.bdl.addrLow =
+					putPaddrLow(pdma_phys_bpl);
+			iocb->un.fcpi64.bdl.addrHigh =
+					putPaddrHigh(pdma_phys_bpl);
+			iocb->ulpBdeCount = 1;
+			iocb->ulpLe = 1;
+		}
+		iocb->ulpClass = CLASS3;
+		psb->status = IOSTAT_SUCCESS;
+		/* Put it back into the SCSI buffer list */
+		psb->cur_iocbq.context1  = psb;
+		lpfc_release_scsi_buf_s3(phba, psb);
+
+	}
+
+	return bcnt;
+}
+
+/**
+ * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
+ * @vport: pointer to lpfc vport data structure.
+ *
+ * This routine is invoked by the vport cleanup for deletions and the cleanup
+ * for an ndlp on removal.
+ **/
+void
+lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_scsi_buf *psb, *next_psb;
+	unsigned long iflag = 0;
+
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+	list_for_each_entry_safe(psb, next_psb,
+				&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
+		if (psb->rdata && psb->rdata->pnode
+			&& psb->rdata->pnode->vport == vport)
+			psb->rdata = NULL;
+	}
+	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+}
+
+/**
+ * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the fcp xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 fast-path
+ * FCP aborted xri.
+ **/
+void
+lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
+			  struct sli4_wcqe_xri_aborted *axri)
+{
+	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+	struct lpfc_scsi_buf *psb, *next_psb;
+	unsigned long iflag = 0;
+	struct lpfc_iocbq *iocbq;
+	int i;
+	struct lpfc_nodelist *ndlp;
+	int rrq_empty = 0;
+	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+	list_for_each_entry_safe(psb, next_psb,
+		&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
+		if (psb->cur_iocbq.sli4_xritag == xri) {
+			list_del(&psb->list);
+			psb->exch_busy = 0;
+			psb->status = IOSTAT_SUCCESS;
+			spin_unlock(
+				&phba->sli4_hba.abts_scsi_buf_list_lock);
+			if (psb->rdata && psb->rdata->pnode)
+				ndlp = psb->rdata->pnode;
+			else
+				ndlp = NULL;
+
+			rrq_empty = list_empty(&phba->active_rrq_list);
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			if (ndlp) {
+				lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
+				lpfc_sli4_abts_err_handler(phba, ndlp, axri);
+			}
+			lpfc_release_scsi_buf_s4(phba, psb);
+			if (rrq_empty)
+				lpfc_worker_wake_up(phba);
+			return;
+		}
+	}
+	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+	for (i = 1; i <= phba->sli.last_iotag; i++) {
+		iocbq = phba->sli.iocbq_lookup[i];
+
+		if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
+			(iocbq->iocb_flag & LPFC_IO_LIBDFC))
+			continue;
+		if (iocbq->sli4_xritag != xri)
+			continue;
+		psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
+		psb->exch_busy = 0;
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		if (pring->txq_cnt)
+			lpfc_worker_wake_up(phba);
+		return;
+
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+}
+
+/**
+ * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine walks the list of scsi buffers that have been allocated and
+ * repost them to the HBA by using SGL block post. This is needed after a
+ * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
+ * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
+ * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
+ *
+ * Returns: 0 = success, non-zero failure.
+ **/
+int
+lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
+{
+	struct lpfc_scsi_buf *psb;
+	int index, status, bcnt = 0, rcnt = 0, rc = 0;
+	LIST_HEAD(sblist);
+
+	for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
+		psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
+		if (psb) {
+			/* Remove from SCSI buffer list */
+			list_del(&psb->list);
+			/* Add it to a local SCSI buffer list */
+			list_add_tail(&psb->list, &sblist);
+			if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
+				bcnt = rcnt;
+				rcnt = 0;
+			}
+		} else
+			/* A hole present in the XRI array, need to skip */
+			bcnt = rcnt;
+
+		if (index == phba->sli4_hba.scsi_xri_cnt - 1)
+			/* End of XRI array for SCSI buffer, complete */
+			bcnt = rcnt;
+
+		/* Continue until collect up to a nembed page worth of sgls */
+		if (bcnt == 0)
+			continue;
+		/* Now, post the SCSI buffer list sgls as a block */
+		if (!phba->sli4_hba.extents_in_use)
+			status = lpfc_sli4_post_scsi_sgl_block(phba,
+							&sblist,
+							bcnt);
+		else
+			status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
+							&sblist,
+							bcnt);
+		/* Reset SCSI buffer count for next round of posting */
+		bcnt = 0;
+		while (!list_empty(&sblist)) {
+			list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
+					 list);
+			if (status) {
+				/* Put this back on the abort scsi list */
+				psb->exch_busy = 1;
+				rc++;
+			} else {
+				psb->exch_busy = 0;
+				psb->status = IOSTAT_SUCCESS;
+			}
+			/* Put it back into the SCSI buffer list */
+			lpfc_release_scsi_buf_s4(phba, psb);
+		}
+	}
+	return rc;
+}
+
+/**
+ * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
+ * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
+ *
+ * This routine allocates a scsi buffer for device with SLI-4 interface spec,
+ * the scsi buffer contains all the necessary information needed to initiate
+ * a SCSI I/O.
+ *
+ * Return codes:
+ *   int - number of scsi buffers that were allocated.
+ *   0 = failure, less than num_to_alloc is a partial failure.
+ **/
+static int
+lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_scsi_buf *psb;
+	struct sli4_sge *sgl;
+	IOCB_t *iocb;
+	dma_addr_t pdma_phys_fcp_cmd;
+	dma_addr_t pdma_phys_fcp_rsp;
+	dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
+	uint16_t iotag, last_xritag = NO_XRI, lxri = 0;
+	int status = 0, index;
+	int bcnt;
+	int non_sequential_xri = 0;
+	LIST_HEAD(sblist);
+
+	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
+		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
+		if (!psb)
+			break;
+
+		/*
+		 * Get memory from the pci pool to map the virt space to pci bus
+		 * space for an I/O.  The DMA buffer includes space for the
+		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
+		 * necessary to support the sg_tablesize.
+		 */
+		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
+						GFP_KERNEL, &psb->dma_handle);
+		if (!psb->data) {
+			kfree(psb);
+			break;
+		}
+
+		/* Initialize virtual ptrs to dma_buf region. */
+		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
+
+		/* Allocate iotag for psb->cur_iocbq. */
+		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
+		if (iotag == 0) {
+			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+				psb->data, psb->dma_handle);
+			kfree(psb);
+			break;
+		}
+
+		lxri = lpfc_sli4_next_xritag(phba);
+		if (lxri == NO_XRI) {
+			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+			      psb->data, psb->dma_handle);
+			kfree(psb);
+			break;
+		}
+		psb->cur_iocbq.sli4_lxritag = lxri;
+		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+		if (last_xritag != NO_XRI
+			&& psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
+			non_sequential_xri = 1;
+		} else
+			list_add_tail(&psb->list, &sblist);
+		last_xritag = psb->cur_iocbq.sli4_xritag;
+
+		index = phba->sli4_hba.scsi_xri_cnt++;
+		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
+
+		psb->fcp_bpl = psb->data;
+		psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
+			- (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+		psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
+					sizeof(struct fcp_cmnd));
+
+		/* Initialize local short-hand pointers. */
+		sgl = (struct sli4_sge *)psb->fcp_bpl;
+		pdma_phys_bpl = psb->dma_handle;
+		pdma_phys_fcp_cmd =
+			(psb->dma_handle + phba->cfg_sg_dma_buf_size)
+			 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+		pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
+
+		/*
+		 * The first two bdes are the FCP_CMD and FCP_RSP.  The balance
+		 * are sg list bdes.  Initialize the first two and leave the
+		 * rest for queuecommand.
+		 */
+		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
+		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
+		sgl->word2 = le32_to_cpu(sgl->word2);
+		bf_set(lpfc_sli4_sge_last, sgl, 0);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
+		sgl++;
+
+		/* Setup the physical region for the FCP RSP */
+		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
+		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
+		sgl->word2 = le32_to_cpu(sgl->word2);
+		bf_set(lpfc_sli4_sge_last, sgl, 1);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
+
+		/*
+		 * Since the IOCB for the FCP I/O is built into this
+		 * lpfc_scsi_buf, initialize it with all known data now.
+		 */
+		iocb = &psb->cur_iocbq.iocb;
+		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
+		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
+		/* setting the BLP size to 2 * sizeof BDE may not be correct.
+		 * We are setting the bpl to point to out sgl. An sgl's
+		 * entries are 16 bytes, a bpl entries are 12 bytes.
+		 */
+		iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
+		iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
+		iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
+		iocb->ulpBdeCount = 1;
+		iocb->ulpLe = 1;
+		iocb->ulpClass = CLASS3;
+		psb->cur_iocbq.context1  = psb;
+		if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+			pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
+		else
+			pdma_phys_bpl1 = 0;
+		psb->dma_phys_bpl = pdma_phys_bpl;
+		phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
+		if (non_sequential_xri) {
+			status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
+						pdma_phys_bpl1,
+						psb->cur_iocbq.sli4_xritag);
+			if (status) {
+				/* Put this back on the abort scsi list */
+				psb->exch_busy = 1;
+			} else {
+				psb->exch_busy = 0;
+				psb->status = IOSTAT_SUCCESS;
+			}
+			/* Put it back into the SCSI buffer list */
+			lpfc_release_scsi_buf_s4(phba, psb);
+			break;
+		}
+	}
+	if (bcnt) {
+		if (!phba->sli4_hba.extents_in_use)
+			status = lpfc_sli4_post_scsi_sgl_block(phba,
+								&sblist,
+								bcnt);
+		else
+			status = lpfc_sli4_post_scsi_sgl_blk_ext(phba,
+								&sblist,
+								bcnt);
+
+		if (status) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"3021 SCSI SGL post error %d\n",
+					status);
+			bcnt = 0;
+		}
+		/* Reset SCSI buffer count for next round of posting */
+		while (!list_empty(&sblist)) {
+			list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
+				 list);
+			if (status) {
+				/* Put this back on the abort scsi list */
+				psb->exch_busy = 1;
+			} else {
+				psb->exch_busy = 0;
+				psb->status = IOSTAT_SUCCESS;
+			}
+			/* Put it back into the SCSI buffer list */
+			lpfc_release_scsi_buf_s4(phba, psb);
+		}
+	}
+
+	return bcnt + non_sequential_xri;
+}
+
+/**
+ * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
+ * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
+ *
+ * This routine wraps the actual SCSI buffer allocator function pointer from
+ * the lpfc_hba struct.
+ *
+ * Return codes:
+ *   int - number of scsi buffers that were allocated.
+ *   0 = failure, less than num_to_alloc is a partial failure.
+ **/
+static inline int
+lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
+{
+	return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
+}
+
+/**
+ * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
+ * @phba: The HBA for which this call is being executed.
+ *
+ * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
+ * and returns to caller.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_scsi_buf - Success
+ **/
+static struct lpfc_scsi_buf*
+lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+	struct  lpfc_scsi_buf * lpfc_cmd = NULL;
+	struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
+	unsigned long iflag = 0;
+
+	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
+	list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
+	if (lpfc_cmd) {
+		lpfc_cmd->seg_cnt = 0;
+		lpfc_cmd->nonsg_phys = 0;
+		lpfc_cmd->prot_seg_cnt = 0;
+	}
+	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+	return  lpfc_cmd;
+}
+/**
+ * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
+ * @phba: The HBA for which this call is being executed.
+ *
+ * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
+ * and returns to caller.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_scsi_buf - Success
+ **/
+static struct lpfc_scsi_buf*
+lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_scsi_buf *lpfc_cmd ;
+	unsigned long iflag = 0;
+	int found = 0;
+
+	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
+	list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
+							list) {
+		if (lpfc_test_rrq_active(phba, ndlp,
+					 lpfc_cmd->cur_iocbq.sli4_xritag))
+			continue;
+		list_del(&lpfc_cmd->list);
+		found = 1;
+		lpfc_cmd->seg_cnt = 0;
+		lpfc_cmd->nonsg_phys = 0;
+		lpfc_cmd->prot_seg_cnt = 0;
+		break;
+	}
+	spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
+						 iflag);
+	if (!found)
+		return NULL;
+	else
+		return  lpfc_cmd;
+}
+/**
+ * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
+ * @phba: The HBA for which this call is being executed.
+ *
+ * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
+ * and returns to caller.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_scsi_buf - Success
+ **/
+static struct lpfc_scsi_buf*
+lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+	return  phba->lpfc_get_scsi_buf(phba, ndlp);
+}
+
+/**
+ * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is being released.
+ *
+ * This routine releases @psb scsi buffer by adding it to tail of @phba
+ * lpfc_scsi_buf_list list.
+ **/
+static void
+lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+	unsigned long iflag = 0;
+
+	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
+	psb->pCmd = NULL;
+	list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
+	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+}
+
+/**
+ * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is being released.
+ *
+ * This routine releases @psb scsi buffer by adding it to tail of @phba
+ * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
+ * and cannot be reused for at least RA_TOV amount of time if it was
+ * aborted.
+ **/
+static void
+lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+	unsigned long iflag = 0;
+
+	if (psb->exch_busy) {
+		spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
+					iflag);
+		psb->pCmd = NULL;
+		list_add_tail(&psb->list,
+			&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+		spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
+					iflag);
+	} else {
+
+		spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
+		psb->pCmd = NULL;
+		list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
+		spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+	}
+}
+
+/**
+ * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is being released.
+ *
+ * This routine releases @psb scsi buffer by adding it to tail of @phba
+ * lpfc_scsi_buf_list list.
+ **/
+static void
+lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+
+	phba->lpfc_release_scsi_buf(phba, psb);
+}
+
+/**
+ * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
+ * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
+ * through sg elements and format the bdea. This routine also initializes all
+ * IOCB fields which are dependent on scsi command request buffer.
+ *
+ * Return codes:
+ *   1 - Error
+ *   0 - Success
+ **/
+static int
+lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+	struct scatterlist *sgel = NULL;
+	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
+	struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
+	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+	struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
+	dma_addr_t physaddr;
+	uint32_t num_bde = 0;
+	int nseg, datadir = scsi_cmnd->sc_data_direction;
+
+	/*
+	 * There are three possibilities here - use scatter-gather segment, use
+	 * the single mapping, or neither.  Start the lpfc command prep by
+	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
+	 * data bde entry.
+	 */
+	bpl += 2;
+	if (scsi_sg_count(scsi_cmnd)) {
+		/*
+		 * The driver stores the segment count returned from pci_map_sg
+		 * because this a count of dma-mappings used to map the use_sg
+		 * pages.  They are not guaranteed to be the same for those
+		 * architectures that implement an IOMMU.
+		 */
+
+		nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
+				  scsi_sg_count(scsi_cmnd), datadir);
+		if (unlikely(!nseg))
+			return 1;
+
+		lpfc_cmd->seg_cnt = nseg;
+		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9064 BLKGRD: %s: Too many sg segments from "
+			       "dma_map_sg.  Config %d, seg_cnt %d\n",
+			       __func__, phba->cfg_sg_seg_cnt,
+			       lpfc_cmd->seg_cnt);
+			scsi_dma_unmap(scsi_cmnd);
+			return 1;
+		}
+
+		/*
+		 * The driver established a maximum scatter-gather segment count
+		 * during probe that limits the number of sg elements in any
+		 * single scsi command.  Just run through the seg_cnt and format
+		 * the bde's.
+		 * When using SLI-3 the driver will try to fit all the BDEs into
+		 * the IOCB. If it can't then the BDEs get added to a BPL as it
+		 * does for SLI-2 mode.
+		 */
+		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
+			physaddr = sg_dma_address(sgel);
+			if (phba->sli_rev == 3 &&
+			    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
+			    !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
+			    nseg <= LPFC_EXT_DATA_BDE_COUNT) {
+				data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+				data_bde->tus.f.bdeSize = sg_dma_len(sgel);
+				data_bde->addrLow = putPaddrLow(physaddr);
+				data_bde->addrHigh = putPaddrHigh(physaddr);
+				data_bde++;
+			} else {
+				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+				bpl->tus.f.bdeSize = sg_dma_len(sgel);
+				bpl->tus.w = le32_to_cpu(bpl->tus.w);
+				bpl->addrLow =
+					le32_to_cpu(putPaddrLow(physaddr));
+				bpl->addrHigh =
+					le32_to_cpu(putPaddrHigh(physaddr));
+				bpl++;
+			}
+		}
+	}
+
+	/*
+	 * Finish initializing those IOCB fields that are dependent on the
+	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
+	 * explicitly reinitialized and for SLI-3 the extended bde count is
+	 * explicitly reinitialized since all iocb memory resources are reused.
+	 */
+	if (phba->sli_rev == 3 &&
+	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
+	    !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
+		if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
+			/*
+			 * The extended IOCB format can only fit 3 BDE or a BPL.
+			 * This I/O has more than 3 BDE so the 1st data bde will
+			 * be a BPL that is filled in here.
+			 */
+			physaddr = lpfc_cmd->dma_handle;
+			data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
+			data_bde->tus.f.bdeSize = (num_bde *
+						   sizeof(struct ulp_bde64));
+			physaddr += (sizeof(struct fcp_cmnd) +
+				     sizeof(struct fcp_rsp) +
+				     (2 * sizeof(struct ulp_bde64)));
+			data_bde->addrHigh = putPaddrHigh(physaddr);
+			data_bde->addrLow = putPaddrLow(physaddr);
+			/* ebde count includes the response bde and data bpl */
+			iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
+		} else {
+			/* ebde count includes the response bde and data bdes */
+			iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
+		}
+	} else {
+		iocb_cmd->un.fcpi64.bdl.bdeSize =
+			((num_bde + 2) * sizeof(struct ulp_bde64));
+		iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
+	}
+	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
+
+	/*
+	 * Due to difference in data length between DIF/non-DIF paths,
+	 * we need to set word 4 of IOCB here
+	 */
+	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+	return 0;
+}
+
+static inline unsigned
+lpfc_cmd_blksize(struct scsi_cmnd *sc)
+{
+	return sc->device->sector_size;
+}
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+
+/* Return if if error injection is detected by Initiator */
+#define BG_ERR_INIT	0x1
+/* Return if if error injection is detected by Target */
+#define BG_ERR_TGT	0x2
+/* Return if if swapping CSUM<-->CRC is required for error injection */
+#define BG_ERR_SWAP	0x10
+/* Return if disabling Guard/Ref/App checking is required for error injection */
+#define BG_ERR_CHECK	0x20
+
+/**
+ * lpfc_bg_err_inject - Determine if we should inject an error
+ * @phba: The Hba for which this call is being executed.
+ * @sc: The SCSI command to examine
+ * @reftag: (out) BlockGuard reference tag for transmitted data
+ * @apptag: (out) BlockGuard application tag for transmitted data
+ * @new_guard (in) Value to replace CRC with if needed
+ *
+ * Returns BG_ERR_* bit mask or 0 if request ignored
+ **/
+static int
+lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+		uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
+{
+	struct scatterlist *sgpe; /* s/g prot entry */
+	struct scatterlist *sgde; /* s/g data entry */
+	struct lpfc_scsi_buf *lpfc_cmd = NULL;
+	struct scsi_dif_tuple *src = NULL;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_rport_data *rdata;
+	uint32_t op = scsi_get_prot_op(sc);
+	uint32_t blksize;
+	uint32_t numblks;
+	sector_t lba;
+	int rc = 0;
+	int blockoff = 0;
+
+	if (op == SCSI_PROT_NORMAL)
+		return 0;
+
+	sgpe = scsi_prot_sglist(sc);
+	sgde = scsi_sglist(sc);
+	lba = scsi_get_lba(sc);
+
+	/* First check if we need to match the LBA */
+	if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
+		blksize = lpfc_cmd_blksize(sc);
+		numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
+
+		/* Make sure we have the right LBA if one is specified */
+		if ((phba->lpfc_injerr_lba < lba) ||
+			(phba->lpfc_injerr_lba >= (lba + numblks)))
+			return 0;
+		if (sgpe) {
+			blockoff = phba->lpfc_injerr_lba - lba;
+			numblks = sg_dma_len(sgpe) /
+				sizeof(struct scsi_dif_tuple);
+			if (numblks < blockoff)
+				blockoff = numblks;
+		}
+	}
+
+	/* Next check if we need to match the remote NPortID or WWPN */
+	rdata = sc->device->hostdata;
+	if (rdata && rdata->pnode) {
+		ndlp = rdata->pnode;
+
+		/* Make sure we have the right NPortID if one is specified */
+		if (phba->lpfc_injerr_nportid  &&
+			(phba->lpfc_injerr_nportid != ndlp->nlp_DID))
+			return 0;
+
+		/*
+		 * Make sure we have the right WWPN if one is specified.
+		 * wwn[0] should be a non-zero NAA in a good WWPN.
+		 */
+		if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
+			(memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
+				sizeof(struct lpfc_name)) != 0))
+			return 0;
+	}
+
+	/* Setup a ptr to the protection data if the SCSI host provides it */
+	if (sgpe) {
+		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+		src += blockoff;
+		lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
+	}
+
+	/* Should we change the Reference Tag */
+	if (reftag) {
+		if (phba->lpfc_injerr_wref_cnt) {
+			switch (op) {
+			case SCSI_PROT_WRITE_PASS:
+				if (src) {
+					/*
+					 * For WRITE_PASS, force the error
+					 * to be sent on the wire. It should
+					 * be detected by the Target.
+					 * If blockoff != 0 error will be
+					 * inserted in middle of the IO.
+					 */
+
+					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9076 BLKGRD: Injecting reftag error: "
+					"write lba x%lx + x%x oldrefTag x%x\n",
+					(unsigned long)lba, blockoff,
+					be32_to_cpu(src->ref_tag));
+
+					/*
+					 * Save the old ref_tag so we can
+					 * restore it on completion.
+					 */
+					if (lpfc_cmd) {
+						lpfc_cmd->prot_data_type =
+							LPFC_INJERR_REFTAG;
+						lpfc_cmd->prot_data_segment =
+							src;
+						lpfc_cmd->prot_data =
+							src->ref_tag;
+					}
+					src->ref_tag = cpu_to_be32(0xDEADBEEF);
+					phba->lpfc_injerr_wref_cnt--;
+					if (phba->lpfc_injerr_wref_cnt == 0) {
+						phba->lpfc_injerr_nportid = 0;
+						phba->lpfc_injerr_lba =
+							LPFC_INJERR_LBA_OFF;
+						memset(&phba->lpfc_injerr_wwpn,
+						  0, sizeof(struct lpfc_name));
+					}
+					rc = BG_ERR_TGT | BG_ERR_CHECK;
+
+					break;
+				}
+				/* Drop thru */
+			case SCSI_PROT_WRITE_INSERT:
+				/*
+				 * For WRITE_INSERT, force the error
+				 * to be sent on the wire. It should be
+				 * detected by the Target.
+				 */
+				/* DEADBEEF will be the reftag on the wire */
+				*reftag = 0xDEADBEEF;
+				phba->lpfc_injerr_wref_cnt--;
+				if (phba->lpfc_injerr_wref_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+					LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+				rc = BG_ERR_TGT | BG_ERR_CHECK;
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9078 BLKGRD: Injecting reftag error: "
+					"write lba x%lx\n", (unsigned long)lba);
+				break;
+			case SCSI_PROT_WRITE_STRIP:
+				/*
+				 * For WRITE_STRIP and WRITE_PASS,
+				 * force the error on data
+				 * being copied from SLI-Host to SLI-Port.
+				 */
+				*reftag = 0xDEADBEEF;
+				phba->lpfc_injerr_wref_cnt--;
+				if (phba->lpfc_injerr_wref_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+				rc = BG_ERR_INIT;
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9077 BLKGRD: Injecting reftag error: "
+					"write lba x%lx\n", (unsigned long)lba);
+				break;
+			}
+		}
+		if (phba->lpfc_injerr_rref_cnt) {
+			switch (op) {
+			case SCSI_PROT_READ_INSERT:
+			case SCSI_PROT_READ_STRIP:
+			case SCSI_PROT_READ_PASS:
+				/*
+				 * For READ_STRIP and READ_PASS, force the
+				 * error on data being read off the wire. It
+				 * should force an IO error to the driver.
+				 */
+				*reftag = 0xDEADBEEF;
+				phba->lpfc_injerr_rref_cnt--;
+				if (phba->lpfc_injerr_rref_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+				rc = BG_ERR_INIT;
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9079 BLKGRD: Injecting reftag error: "
+					"read lba x%lx\n", (unsigned long)lba);
+				break;
+			}
+		}
+	}
+
+	/* Should we change the Application Tag */
+	if (apptag) {
+		if (phba->lpfc_injerr_wapp_cnt) {
+			switch (op) {
+			case SCSI_PROT_WRITE_PASS:
+				if (src) {
+					/*
+					 * For WRITE_PASS, force the error
+					 * to be sent on the wire. It should
+					 * be detected by the Target.
+					 * If blockoff != 0 error will be
+					 * inserted in middle of the IO.
+					 */
+
+					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9080 BLKGRD: Injecting apptag error: "
+					"write lba x%lx + x%x oldappTag x%x\n",
+					(unsigned long)lba, blockoff,
+					be16_to_cpu(src->app_tag));
+
+					/*
+					 * Save the old app_tag so we can
+					 * restore it on completion.
+					 */
+					if (lpfc_cmd) {
+						lpfc_cmd->prot_data_type =
+							LPFC_INJERR_APPTAG;
+						lpfc_cmd->prot_data_segment =
+							src;
+						lpfc_cmd->prot_data =
+							src->app_tag;
+					}
+					src->app_tag = cpu_to_be16(0xDEAD);
+					phba->lpfc_injerr_wapp_cnt--;
+					if (phba->lpfc_injerr_wapp_cnt == 0) {
+						phba->lpfc_injerr_nportid = 0;
+						phba->lpfc_injerr_lba =
+							LPFC_INJERR_LBA_OFF;
+						memset(&phba->lpfc_injerr_wwpn,
+						  0, sizeof(struct lpfc_name));
+					}
+					rc = BG_ERR_TGT | BG_ERR_CHECK;
+					break;
+				}
+				/* Drop thru */
+			case SCSI_PROT_WRITE_INSERT:
+				/*
+				 * For WRITE_INSERT, force the
+				 * error to be sent on the wire. It should be
+				 * detected by the Target.
+				 */
+				/* DEAD will be the apptag on the wire */
+				*apptag = 0xDEAD;
+				phba->lpfc_injerr_wapp_cnt--;
+				if (phba->lpfc_injerr_wapp_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+				rc = BG_ERR_TGT | BG_ERR_CHECK;
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"0813 BLKGRD: Injecting apptag error: "
+					"write lba x%lx\n", (unsigned long)lba);
+				break;
+			case SCSI_PROT_WRITE_STRIP:
+				/*
+				 * For WRITE_STRIP and WRITE_PASS,
+				 * force the error on data
+				 * being copied from SLI-Host to SLI-Port.
+				 */
+				*apptag = 0xDEAD;
+				phba->lpfc_injerr_wapp_cnt--;
+				if (phba->lpfc_injerr_wapp_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+				rc = BG_ERR_INIT;
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"0812 BLKGRD: Injecting apptag error: "
+					"write lba x%lx\n", (unsigned long)lba);
+				break;
+			}
+		}
+		if (phba->lpfc_injerr_rapp_cnt) {
+			switch (op) {
+			case SCSI_PROT_READ_INSERT:
+			case SCSI_PROT_READ_STRIP:
+			case SCSI_PROT_READ_PASS:
+				/*
+				 * For READ_STRIP and READ_PASS, force the
+				 * error on data being read off the wire. It
+				 * should force an IO error to the driver.
+				 */
+				*apptag = 0xDEAD;
+				phba->lpfc_injerr_rapp_cnt--;
+				if (phba->lpfc_injerr_rapp_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+				rc = BG_ERR_INIT;
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"0814 BLKGRD: Injecting apptag error: "
+					"read lba x%lx\n", (unsigned long)lba);
+				break;
+			}
+		}
+	}
+
+
+	/* Should we change the Guard Tag */
+	if (new_guard) {
+		if (phba->lpfc_injerr_wgrd_cnt) {
+			switch (op) {
+			case SCSI_PROT_WRITE_PASS:
+				rc = BG_ERR_CHECK;
+				/* Drop thru */
+
+			case SCSI_PROT_WRITE_INSERT:
+				/*
+				 * For WRITE_INSERT, force the
+				 * error to be sent on the wire. It should be
+				 * detected by the Target.
+				 */
+				phba->lpfc_injerr_wgrd_cnt--;
+				if (phba->lpfc_injerr_wgrd_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+
+				rc |= BG_ERR_TGT | BG_ERR_SWAP;
+				/* Signals the caller to swap CRC->CSUM */
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"0817 BLKGRD: Injecting guard error: "
+					"write lba x%lx\n", (unsigned long)lba);
+				break;
+			case SCSI_PROT_WRITE_STRIP:
+				/*
+				 * For WRITE_STRIP and WRITE_PASS,
+				 * force the error on data
+				 * being copied from SLI-Host to SLI-Port.
+				 */
+				phba->lpfc_injerr_wgrd_cnt--;
+				if (phba->lpfc_injerr_wgrd_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+
+				rc = BG_ERR_INIT | BG_ERR_SWAP;
+				/* Signals the caller to swap CRC->CSUM */
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"0816 BLKGRD: Injecting guard error: "
+					"write lba x%lx\n", (unsigned long)lba);
+				break;
+			}
+		}
+		if (phba->lpfc_injerr_rgrd_cnt) {
+			switch (op) {
+			case SCSI_PROT_READ_INSERT:
+			case SCSI_PROT_READ_STRIP:
+			case SCSI_PROT_READ_PASS:
+				/*
+				 * For READ_STRIP and READ_PASS, force the
+				 * error on data being read off the wire. It
+				 * should force an IO error to the driver.
+				 */
+				phba->lpfc_injerr_rgrd_cnt--;
+				if (phba->lpfc_injerr_rgrd_cnt == 0) {
+					phba->lpfc_injerr_nportid = 0;
+					phba->lpfc_injerr_lba =
+						LPFC_INJERR_LBA_OFF;
+					memset(&phba->lpfc_injerr_wwpn,
+						0, sizeof(struct lpfc_name));
+				}
+
+				rc = BG_ERR_INIT | BG_ERR_SWAP;
+				/* Signals the caller to swap CRC->CSUM */
+
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"0818 BLKGRD: Injecting guard error: "
+					"read lba x%lx\n", (unsigned long)lba);
+			}
+		}
+	}
+
+	return rc;
+}
+#endif
+
+/**
+ * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
+ * the specified SCSI command.
+ * @phba: The Hba for which this call is being executed.
+ * @sc: The SCSI command to examine
+ * @txopt: (out) BlockGuard operation for transmitted data
+ * @rxopt: (out) BlockGuard operation for received data
+ *
+ * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
+ *
+ **/
+static int
+lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+		uint8_t *txop, uint8_t *rxop)
+{
+	uint8_t guard_type = scsi_host_get_guard(sc->device->host);
+	uint8_t ret = 0;
+
+	if (guard_type == SHOST_DIX_GUARD_IP) {
+		switch (scsi_get_prot_op(sc)) {
+		case SCSI_PROT_READ_INSERT:
+		case SCSI_PROT_WRITE_STRIP:
+			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
+			*txop = BG_OP_IN_CSUM_OUT_NODIF;
+			break;
+
+		case SCSI_PROT_READ_STRIP:
+		case SCSI_PROT_WRITE_INSERT:
+			*rxop = BG_OP_IN_CRC_OUT_NODIF;
+			*txop = BG_OP_IN_NODIF_OUT_CRC;
+			break;
+
+		case SCSI_PROT_READ_PASS:
+		case SCSI_PROT_WRITE_PASS:
+			*rxop = BG_OP_IN_CRC_OUT_CSUM;
+			*txop = BG_OP_IN_CSUM_OUT_CRC;
+			break;
+
+		case SCSI_PROT_NORMAL:
+		default:
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9063 BLKGRD: Bad op/guard:%d/IP combination\n",
+					scsi_get_prot_op(sc));
+			ret = 1;
+			break;
+
+		}
+	} else {
+		switch (scsi_get_prot_op(sc)) {
+		case SCSI_PROT_READ_STRIP:
+		case SCSI_PROT_WRITE_INSERT:
+			*rxop = BG_OP_IN_CRC_OUT_NODIF;
+			*txop = BG_OP_IN_NODIF_OUT_CRC;
+			break;
+
+		case SCSI_PROT_READ_PASS:
+		case SCSI_PROT_WRITE_PASS:
+			*rxop = BG_OP_IN_CRC_OUT_CRC;
+			*txop = BG_OP_IN_CRC_OUT_CRC;
+			break;
+
+		case SCSI_PROT_READ_INSERT:
+		case SCSI_PROT_WRITE_STRIP:
+			*rxop = BG_OP_IN_NODIF_OUT_CRC;
+			*txop = BG_OP_IN_CRC_OUT_NODIF;
+			break;
+
+		case SCSI_PROT_NORMAL:
+		default:
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
+					scsi_get_prot_op(sc));
+			ret = 1;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+/**
+ * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
+ * the specified SCSI command in order to force a guard tag error.
+ * @phba: The Hba for which this call is being executed.
+ * @sc: The SCSI command to examine
+ * @txopt: (out) BlockGuard operation for transmitted data
+ * @rxopt: (out) BlockGuard operation for received data
+ *
+ * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
+ *
+ **/
+static int
+lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+		uint8_t *txop, uint8_t *rxop)
+{
+	uint8_t guard_type = scsi_host_get_guard(sc->device->host);
+	uint8_t ret = 0;
+
+	if (guard_type == SHOST_DIX_GUARD_IP) {
+		switch (scsi_get_prot_op(sc)) {
+		case SCSI_PROT_READ_INSERT:
+		case SCSI_PROT_WRITE_STRIP:
+			*rxop = BG_OP_IN_NODIF_OUT_CRC;
+			*txop = BG_OP_IN_CRC_OUT_NODIF;
+			break;
+
+		case SCSI_PROT_READ_STRIP:
+		case SCSI_PROT_WRITE_INSERT:
+			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
+			*txop = BG_OP_IN_NODIF_OUT_CSUM;
+			break;
+
+		case SCSI_PROT_READ_PASS:
+		case SCSI_PROT_WRITE_PASS:
+			*rxop = BG_OP_IN_CSUM_OUT_CRC;
+			*txop = BG_OP_IN_CRC_OUT_CSUM;
+			break;
+
+		case SCSI_PROT_NORMAL:
+		default:
+			break;
+
+		}
+	} else {
+		switch (scsi_get_prot_op(sc)) {
+		case SCSI_PROT_READ_STRIP:
+		case SCSI_PROT_WRITE_INSERT:
+			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
+			*txop = BG_OP_IN_NODIF_OUT_CSUM;
+			break;
+
+		case SCSI_PROT_READ_PASS:
+		case SCSI_PROT_WRITE_PASS:
+			*rxop = BG_OP_IN_CSUM_OUT_CSUM;
+			*txop = BG_OP_IN_CSUM_OUT_CSUM;
+			break;
+
+		case SCSI_PROT_READ_INSERT:
+		case SCSI_PROT_WRITE_STRIP:
+			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
+			*txop = BG_OP_IN_CSUM_OUT_NODIF;
+			break;
+
+		case SCSI_PROT_NORMAL:
+		default:
+			break;
+		}
+	}
+
+	return ret;
+}
+#endif
+
+/**
+ * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ * @bpl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ *
+ * This function sets up BPL buffer list for protection groups of
+ * type LPFC_PG_TYPE_NO_DIF
+ *
+ * This is usually used when the HBA is instructed to generate
+ * DIFs and insert them into data stream (or strip DIF from
+ * incoming data stream)
+ *
+ * The buffer list consists of just one protection group described
+ * below:
+ *                                +-------------------------+
+ *   start of prot group  -->     |          PDE_5          |
+ *                                +-------------------------+
+ *                                |          PDE_6          |
+ *                                +-------------------------+
+ *                                |         Data BDE        |
+ *                                +-------------------------+
+ *                                |more Data BDE's ... (opt)|
+ *                                +-------------------------+
+ *
+ *
+ * Note: Data s/g buffers have been dma mapped
+ *
+ * Returns the number of BDEs added to the BPL.
+ **/
+static int
+lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+		struct ulp_bde64 *bpl, int datasegcnt)
+{
+	struct scatterlist *sgde = NULL; /* s/g data entry */
+	struct lpfc_pde5 *pde5 = NULL;
+	struct lpfc_pde6 *pde6 = NULL;
+	dma_addr_t physaddr;
+	int i = 0, num_bde = 0, status;
+	int datadir = sc->sc_data_direction;
+	uint32_t rc;
+	uint32_t checking = 1;
+	uint32_t reftag;
+	unsigned blksize;
+	uint8_t txop, rxop;
+
+	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+	if (status)
+		goto out;
+
+	/* extract some info from the scsi command for pde*/
+	blksize = lpfc_cmd_blksize(sc);
+	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
+	if (rc) {
+		if (rc & BG_ERR_SWAP)
+			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
+		if (rc & BG_ERR_CHECK)
+			checking = 0;
+	}
+#endif
+
+	/* setup PDE5 with what we have */
+	pde5 = (struct lpfc_pde5 *) bpl;
+	memset(pde5, 0, sizeof(struct lpfc_pde5));
+	bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
+
+	/* Endianness conversion if necessary for PDE5 */
+	pde5->word0 = cpu_to_le32(pde5->word0);
+	pde5->reftag = cpu_to_le32(reftag);
+
+	/* advance bpl and increment bde count */
+	num_bde++;
+	bpl++;
+	pde6 = (struct lpfc_pde6 *) bpl;
+
+	/* setup PDE6 with the rest of the info */
+	memset(pde6, 0, sizeof(struct lpfc_pde6));
+	bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
+	bf_set(pde6_optx, pde6, txop);
+	bf_set(pde6_oprx, pde6, rxop);
+	if (datadir == DMA_FROM_DEVICE) {
+		bf_set(pde6_ce, pde6, checking);
+		bf_set(pde6_re, pde6, checking);
+	}
+	bf_set(pde6_ai, pde6, 1);
+	bf_set(pde6_ae, pde6, 0);
+	bf_set(pde6_apptagval, pde6, 0);
+
+	/* Endianness conversion if necessary for PDE6 */
+	pde6->word0 = cpu_to_le32(pde6->word0);
+	pde6->word1 = cpu_to_le32(pde6->word1);
+	pde6->word2 = cpu_to_le32(pde6->word2);
+
+	/* advance bpl and increment bde count */
+	num_bde++;
+	bpl++;
+
+	/* assumption: caller has already run dma_map_sg on command data */
+	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
+		physaddr = sg_dma_address(sgde);
+		bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
+		bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
+		bpl->tus.f.bdeSize = sg_dma_len(sgde);
+		if (datadir == DMA_TO_DEVICE)
+			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		else
+			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+		bpl->tus.w = le32_to_cpu(bpl->tus.w);
+		bpl++;
+		num_bde++;
+	}
+
+out:
+	return num_bde;
+}
+
+/**
+ * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ * @bpl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ * @protcnt: number of segment of protection data that have been dma mapped
+ *
+ * This function sets up BPL buffer list for protection groups of
+ * type LPFC_PG_TYPE_DIF
+ *
+ * This is usually used when DIFs are in their own buffers,
+ * separate from the data. The HBA can then by instructed
+ * to place the DIFs in the outgoing stream.  For read operations,
+ * The HBA could extract the DIFs and place it in DIF buffers.
+ *
+ * The buffer list for this type consists of one or more of the
+ * protection groups described below:
+ *                                    +-------------------------+
+ *   start of first prot group  -->   |          PDE_5          |
+ *                                    +-------------------------+
+ *                                    |          PDE_6          |
+ *                                    +-------------------------+
+ *                                    |      PDE_7 (Prot BDE)   |
+ *                                    +-------------------------+
+ *                                    |        Data BDE         |
+ *                                    +-------------------------+
+ *                                    |more Data BDE's ... (opt)|
+ *                                    +-------------------------+
+ *   start of new  prot group  -->    |          PDE_5          |
+ *                                    +-------------------------+
+ *                                    |          ...            |
+ *                                    +-------------------------+
+ *
+ * Note: It is assumed that both data and protection s/g buffers have been
+ *       mapped for DMA
+ *
+ * Returns the number of BDEs added to the BPL.
+ **/
+static int
+lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+		struct ulp_bde64 *bpl, int datacnt, int protcnt)
+{
+	struct scatterlist *sgde = NULL; /* s/g data entry */
+	struct scatterlist *sgpe = NULL; /* s/g prot entry */
+	struct lpfc_pde5 *pde5 = NULL;
+	struct lpfc_pde6 *pde6 = NULL;
+	struct lpfc_pde7 *pde7 = NULL;
+	dma_addr_t dataphysaddr, protphysaddr;
+	unsigned short curr_data = 0, curr_prot = 0;
+	unsigned int split_offset;
+	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
+	unsigned int protgrp_blks, protgrp_bytes;
+	unsigned int remainder, subtotal;
+	int status;
+	int datadir = sc->sc_data_direction;
+	unsigned char pgdone = 0, alldone = 0;
+	unsigned blksize;
+	uint32_t rc;
+	uint32_t checking = 1;
+	uint32_t reftag;
+	uint8_t txop, rxop;
+	int num_bde = 0;
+
+	sgpe = scsi_prot_sglist(sc);
+	sgde = scsi_sglist(sc);
+
+	if (!sgpe || !sgde) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+				"9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
+				sgpe, sgde);
+		return 0;
+	}
+
+	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+	if (status)
+		goto out;
+
+	/* extract some info from the scsi command */
+	blksize = lpfc_cmd_blksize(sc);
+	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
+	if (rc) {
+		if (rc & BG_ERR_SWAP)
+			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
+		if (rc & BG_ERR_CHECK)
+			checking = 0;
+	}
+#endif
+
+	split_offset = 0;
+	do {
+		/* setup PDE5 with what we have */
+		pde5 = (struct lpfc_pde5 *) bpl;
+		memset(pde5, 0, sizeof(struct lpfc_pde5));
+		bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
+
+		/* Endianness conversion if necessary for PDE5 */
+		pde5->word0 = cpu_to_le32(pde5->word0);
+		pde5->reftag = cpu_to_le32(reftag);
+
+		/* advance bpl and increment bde count */
+		num_bde++;
+		bpl++;
+		pde6 = (struct lpfc_pde6 *) bpl;
+
+		/* setup PDE6 with the rest of the info */
+		memset(pde6, 0, sizeof(struct lpfc_pde6));
+		bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
+		bf_set(pde6_optx, pde6, txop);
+		bf_set(pde6_oprx, pde6, rxop);
+		bf_set(pde6_ce, pde6, checking);
+		bf_set(pde6_re, pde6, checking);
+		bf_set(pde6_ai, pde6, 1);
+		bf_set(pde6_ae, pde6, 0);
+		bf_set(pde6_apptagval, pde6, 0);
+
+		/* Endianness conversion if necessary for PDE6 */
+		pde6->word0 = cpu_to_le32(pde6->word0);
+		pde6->word1 = cpu_to_le32(pde6->word1);
+		pde6->word2 = cpu_to_le32(pde6->word2);
+
+		/* advance bpl and increment bde count */
+		num_bde++;
+		bpl++;
+
+		/* setup the first BDE that points to protection buffer */
+		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
+		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
+
+		/* must be integer multiple of the DIF block length */
+		BUG_ON(protgroup_len % 8);
+
+		pde7 = (struct lpfc_pde7 *) bpl;
+		memset(pde7, 0, sizeof(struct lpfc_pde7));
+		bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
+
+		pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
+		pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
+
+		protgrp_blks = protgroup_len / 8;
+		protgrp_bytes = protgrp_blks * blksize;
+
+		/* check if this pde is crossing the 4K boundary; if so split */
+		if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
+			protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
+			protgroup_offset += protgroup_remainder;
+			protgrp_blks = protgroup_remainder / 8;
+			protgrp_bytes = protgrp_blks * blksize;
+		} else {
+			protgroup_offset = 0;
+			curr_prot++;
+		}
+
+		num_bde++;
+
+		/* setup BDE's for data blocks associated with DIF data */
+		pgdone = 0;
+		subtotal = 0; /* total bytes processed for current prot grp */
+		while (!pgdone) {
+			if (!sgde) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9065 BLKGRD:%s Invalid data segment\n",
+						__func__);
+				return 0;
+			}
+			bpl++;
+			dataphysaddr = sg_dma_address(sgde) + split_offset;
+			bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
+			bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
+
+			remainder = sg_dma_len(sgde) - split_offset;
+
+			if ((subtotal + remainder) <= protgrp_bytes) {
+				/* we can use this whole buffer */
+				bpl->tus.f.bdeSize = remainder;
+				split_offset = 0;
+
+				if ((subtotal + remainder) == protgrp_bytes)
+					pgdone = 1;
+			} else {
+				/* must split this buffer with next prot grp */
+				bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
+				split_offset += bpl->tus.f.bdeSize;
+			}
+
+			subtotal += bpl->tus.f.bdeSize;
+
+			if (datadir == DMA_TO_DEVICE)
+				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+			else
+				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+			bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+			num_bde++;
+			curr_data++;
+
+			if (split_offset)
+				break;
+
+			/* Move to the next s/g segment if possible */
+			sgde = sg_next(sgde);
+
+		}
+
+		if (protgroup_offset) {
+			/* update the reference tag */
+			reftag += protgrp_blks;
+			bpl++;
+			continue;
+		}
+
+		/* are we done ? */
+		if (curr_prot == protcnt) {
+			alldone = 1;
+		} else if (curr_prot < protcnt) {
+			/* advance to next prot buffer */
+			sgpe = sg_next(sgpe);
+			bpl++;
+
+			/* update the reference tag */
+			reftag += protgrp_blks;
+		} else {
+			/* if we're here, we have a bug */
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9054 BLKGRD: bug in %s\n", __func__);
+		}
+
+	} while (!alldone);
+out:
+
+	return num_bde;
+}
+
+/**
+ * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ * @sgl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ *
+ * This function sets up SGL buffer list for protection groups of
+ * type LPFC_PG_TYPE_NO_DIF
+ *
+ * This is usually used when the HBA is instructed to generate
+ * DIFs and insert them into data stream (or strip DIF from
+ * incoming data stream)
+ *
+ * The buffer list consists of just one protection group described
+ * below:
+ *                                +-------------------------+
+ *   start of prot group  -->     |         DI_SEED         |
+ *                                +-------------------------+
+ *                                |         Data SGE        |
+ *                                +-------------------------+
+ *                                |more Data SGE's ... (opt)|
+ *                                +-------------------------+
+ *
+ *
+ * Note: Data s/g buffers have been dma mapped
+ *
+ * Returns the number of SGEs added to the SGL.
+ **/
+static int
+lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+		struct sli4_sge *sgl, int datasegcnt)
+{
+	struct scatterlist *sgde = NULL; /* s/g data entry */
+	struct sli4_sge_diseed *diseed = NULL;
+	dma_addr_t physaddr;
+	int i = 0, num_sge = 0, status;
+	int datadir = sc->sc_data_direction;
+	uint32_t reftag;
+	unsigned blksize;
+	uint8_t txop, rxop;
+	uint32_t rc;
+	uint32_t checking = 1;
+	uint32_t dma_len;
+	uint32_t dma_offset = 0;
+
+	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+	if (status)
+		goto out;
+
+	/* extract some info from the scsi command for pde*/
+	blksize = lpfc_cmd_blksize(sc);
+	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
+	if (rc) {
+		if (rc & BG_ERR_SWAP)
+			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
+		if (rc & BG_ERR_CHECK)
+			checking = 0;
+	}
+#endif
+
+	/* setup DISEED with what we have */
+	diseed = (struct sli4_sge_diseed *) sgl;
+	memset(diseed, 0, sizeof(struct sli4_sge_diseed));
+	bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
+
+	/* Endianness conversion if necessary */
+	diseed->ref_tag = cpu_to_le32(reftag);
+	diseed->ref_tag_tran = diseed->ref_tag;
+
+	/* setup DISEED with the rest of the info */
+	bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
+	bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
+	if (datadir == DMA_FROM_DEVICE) {
+		bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
+		bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+	}
+	bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
+	bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
+
+	/* Endianness conversion if necessary for DISEED */
+	diseed->word2 = cpu_to_le32(diseed->word2);
+	diseed->word3 = cpu_to_le32(diseed->word3);
+
+	/* advance bpl and increment sge count */
+	num_sge++;
+	sgl++;
+
+	/* assumption: caller has already run dma_map_sg on command data */
+	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
+		physaddr = sg_dma_address(sgde);
+		dma_len = sg_dma_len(sgde);
+		sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
+		sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+		if ((i + 1) == datasegcnt)
+			bf_set(lpfc_sli4_sge_last, sgl, 1);
+		else
+			bf_set(lpfc_sli4_sge_last, sgl, 0);
+		bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+
+		sgl->sge_len = cpu_to_le32(dma_len);
+		dma_offset += dma_len;
+
+		sgl++;
+		num_sge++;
+	}
+
+out:
+	return num_sge;
+}
+
+/**
+ * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ * @sgl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ * @protcnt: number of segment of protection data that have been dma mapped
+ *
+ * This function sets up SGL buffer list for protection groups of
+ * type LPFC_PG_TYPE_DIF
+ *
+ * This is usually used when DIFs are in their own buffers,
+ * separate from the data. The HBA can then by instructed
+ * to place the DIFs in the outgoing stream.  For read operations,
+ * The HBA could extract the DIFs and place it in DIF buffers.
+ *
+ * The buffer list for this type consists of one or more of the
+ * protection groups described below:
+ *                                    +-------------------------+
+ *   start of first prot group  -->   |         DISEED          |
+ *                                    +-------------------------+
+ *                                    |      DIF (Prot SGE)     |
+ *                                    +-------------------------+
+ *                                    |        Data SGE         |
+ *                                    +-------------------------+
+ *                                    |more Data SGE's ... (opt)|
+ *                                    +-------------------------+
+ *   start of new  prot group  -->    |         DISEED          |
+ *                                    +-------------------------+
+ *                                    |          ...            |
+ *                                    +-------------------------+
+ *
+ * Note: It is assumed that both data and protection s/g buffers have been
+ *       mapped for DMA
+ *
+ * Returns the number of SGEs added to the SGL.
+ **/
+static int
+lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+		struct sli4_sge *sgl, int datacnt, int protcnt)
+{
+	struct scatterlist *sgde = NULL; /* s/g data entry */
+	struct scatterlist *sgpe = NULL; /* s/g prot entry */
+	struct sli4_sge_diseed *diseed = NULL;
+	dma_addr_t dataphysaddr, protphysaddr;
+	unsigned short curr_data = 0, curr_prot = 0;
+	unsigned int split_offset;
+	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
+	unsigned int protgrp_blks, protgrp_bytes;
+	unsigned int remainder, subtotal;
+	int status;
+	unsigned char pgdone = 0, alldone = 0;
+	unsigned blksize;
+	uint32_t reftag;
+	uint8_t txop, rxop;
+	uint32_t dma_len;
+	uint32_t rc;
+	uint32_t checking = 1;
+	uint32_t dma_offset = 0;
+	int num_sge = 0;
+
+	sgpe = scsi_prot_sglist(sc);
+	sgde = scsi_sglist(sc);
+
+	if (!sgpe || !sgde) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+				"9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
+				sgpe, sgde);
+		return 0;
+	}
+
+	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
+	if (status)
+		goto out;
+
+	/* extract some info from the scsi command */
+	blksize = lpfc_cmd_blksize(sc);
+	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
+	if (rc) {
+		if (rc & BG_ERR_SWAP)
+			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
+		if (rc & BG_ERR_CHECK)
+			checking = 0;
+	}
+#endif
+
+	split_offset = 0;
+	do {
+		/* setup DISEED with what we have */
+		diseed = (struct sli4_sge_diseed *) sgl;
+		memset(diseed, 0, sizeof(struct sli4_sge_diseed));
+		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
+
+		/* Endianness conversion if necessary */
+		diseed->ref_tag = cpu_to_le32(reftag);
+		diseed->ref_tag_tran = diseed->ref_tag;
+
+		/* setup DISEED with the rest of the info */
+		bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
+		bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
+		bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
+		bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+		bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
+		bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
+
+		/* Endianness conversion if necessary for DISEED */
+		diseed->word2 = cpu_to_le32(diseed->word2);
+		diseed->word3 = cpu_to_le32(diseed->word3);
+
+		/* advance sgl and increment bde count */
+		num_sge++;
+		sgl++;
+
+		/* setup the first BDE that points to protection buffer */
+		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
+		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
+
+		/* must be integer multiple of the DIF block length */
+		BUG_ON(protgroup_len % 8);
+
+		/* Now setup DIF SGE */
+		sgl->word2 = 0;
+		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
+		sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
+		sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
+		sgl->word2 = cpu_to_le32(sgl->word2);
+
+		protgrp_blks = protgroup_len / 8;
+		protgrp_bytes = protgrp_blks * blksize;
+
+		/* check if DIF SGE is crossing the 4K boundary; if so split */
+		if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
+			protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
+			protgroup_offset += protgroup_remainder;
+			protgrp_blks = protgroup_remainder / 8;
+			protgrp_bytes = protgrp_blks * blksize;
+		} else {
+			protgroup_offset = 0;
+			curr_prot++;
+		}
+
+		num_sge++;
+
+		/* setup SGE's for data blocks associated with DIF data */
+		pgdone = 0;
+		subtotal = 0; /* total bytes processed for current prot grp */
+		while (!pgdone) {
+			if (!sgde) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9086 BLKGRD:%s Invalid data segment\n",
+						__func__);
+				return 0;
+			}
+			sgl++;
+			dataphysaddr = sg_dma_address(sgde) + split_offset;
+
+			remainder = sg_dma_len(sgde) - split_offset;
+
+			if ((subtotal + remainder) <= protgrp_bytes) {
+				/* we can use this whole buffer */
+				dma_len = remainder;
+				split_offset = 0;
+
+				if ((subtotal + remainder) == protgrp_bytes)
+					pgdone = 1;
+			} else {
+				/* must split this buffer with next prot grp */
+				dma_len = protgrp_bytes - subtotal;
+				split_offset += dma_len;
+			}
+
+			subtotal += dma_len;
+
+			sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
+			sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
+			bf_set(lpfc_sli4_sge_last, sgl, 0);
+			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+
+			sgl->sge_len = cpu_to_le32(dma_len);
+			dma_offset += dma_len;
+
+			num_sge++;
+			curr_data++;
+
+			if (split_offset)
+				break;
+
+			/* Move to the next s/g segment if possible */
+			sgde = sg_next(sgde);
+		}
+
+		if (protgroup_offset) {
+			/* update the reference tag */
+			reftag += protgrp_blks;
+			sgl++;
+			continue;
+		}
+
+		/* are we done ? */
+		if (curr_prot == protcnt) {
+			bf_set(lpfc_sli4_sge_last, sgl, 1);
+			alldone = 1;
+		} else if (curr_prot < protcnt) {
+			/* advance to next prot buffer */
+			sgpe = sg_next(sgpe);
+			sgl++;
+
+			/* update the reference tag */
+			reftag += protgrp_blks;
+		} else {
+			/* if we're here, we have a bug */
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9085 BLKGRD: bug in %s\n", __func__);
+		}
+
+	} while (!alldone);
+
+out:
+
+	return num_sge;
+}
+
+/**
+ * lpfc_prot_group_type - Get prtotection group type of SCSI command
+ * @phba: The Hba for which this call is being executed.
+ * @sc: pointer to scsi command we're working on
+ *
+ * Given a SCSI command that supports DIF, determine composition of protection
+ * groups involved in setting up buffer lists
+ *
+ * Returns: Protection group type (with or without DIF)
+ *
+ **/
+static int
+lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
+{
+	int ret = LPFC_PG_TYPE_INVALID;
+	unsigned char op = scsi_get_prot_op(sc);
+
+	switch (op) {
+	case SCSI_PROT_READ_STRIP:
+	case SCSI_PROT_WRITE_INSERT:
+		ret = LPFC_PG_TYPE_NO_DIF;
+		break;
+	case SCSI_PROT_READ_INSERT:
+	case SCSI_PROT_WRITE_STRIP:
+	case SCSI_PROT_READ_PASS:
+	case SCSI_PROT_WRITE_PASS:
+		ret = LPFC_PG_TYPE_DIF_BUF;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+				"9021 Unsupported protection op:%d\n", op);
+		break;
+	}
+
+	return ret;
+}
+
+/**
+ * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
+ *
+ * This is the protection/DIF aware version of
+ * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
+ * two functions eventually, but for now, it's here
+ **/
+static int
+lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
+		struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
+	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+	uint32_t num_bde = 0;
+	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
+	int prot_group_type = 0;
+	int diflen, fcpdl;
+	unsigned blksize;
+
+	/*
+	 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
+	 *  fcp_rsp regions to the first data bde entry
+	 */
+	bpl += 2;
+	if (scsi_sg_count(scsi_cmnd)) {
+		/*
+		 * The driver stores the segment count returned from pci_map_sg
+		 * because this a count of dma-mappings used to map the use_sg
+		 * pages.  They are not guaranteed to be the same for those
+		 * architectures that implement an IOMMU.
+		 */
+		datasegcnt = dma_map_sg(&phba->pcidev->dev,
+					scsi_sglist(scsi_cmnd),
+					scsi_sg_count(scsi_cmnd), datadir);
+		if (unlikely(!datasegcnt))
+			return 1;
+
+		lpfc_cmd->seg_cnt = datasegcnt;
+		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9067 BLKGRD: %s: Too many sg segments"
+					" from dma_map_sg.  Config %d, seg_cnt"
+					" %d\n",
+					__func__, phba->cfg_sg_seg_cnt,
+					lpfc_cmd->seg_cnt);
+			scsi_dma_unmap(scsi_cmnd);
+			return 1;
+		}
+
+		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
+
+		switch (prot_group_type) {
+		case LPFC_PG_TYPE_NO_DIF:
+			num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
+					datasegcnt);
+			/* we should have 2 or more entries in buffer list */
+			if (num_bde < 2)
+				goto err;
+			break;
+		case LPFC_PG_TYPE_DIF_BUF:{
+			/*
+			 * This type indicates that protection buffers are
+			 * passed to the driver, so that needs to be prepared
+			 * for DMA
+			 */
+			protsegcnt = dma_map_sg(&phba->pcidev->dev,
+					scsi_prot_sglist(scsi_cmnd),
+					scsi_prot_sg_count(scsi_cmnd), datadir);
+			if (unlikely(!protsegcnt)) {
+				scsi_dma_unmap(scsi_cmnd);
+				return 1;
+			}
+
+			lpfc_cmd->prot_seg_cnt = protsegcnt;
+			if (lpfc_cmd->prot_seg_cnt
+			    > phba->cfg_prot_sg_seg_cnt) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9068 BLKGRD: %s: Too many prot sg "
+					"segments from dma_map_sg.  Config %d,"
+						"prot_seg_cnt %d\n", __func__,
+						phba->cfg_prot_sg_seg_cnt,
+						lpfc_cmd->prot_seg_cnt);
+				dma_unmap_sg(&phba->pcidev->dev,
+					     scsi_prot_sglist(scsi_cmnd),
+					     scsi_prot_sg_count(scsi_cmnd),
+					     datadir);
+				scsi_dma_unmap(scsi_cmnd);
+				return 1;
+			}
+
+			num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
+					datasegcnt, protsegcnt);
+			/* we should have 3 or more entries in buffer list */
+			if (num_bde < 3)
+				goto err;
+			break;
+		}
+		case LPFC_PG_TYPE_INVALID:
+		default:
+			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+					"9022 Unexpected protection group %i\n",
+					prot_group_type);
+			return 1;
+		}
+	}
+
+	/*
+	 * Finish initializing those IOCB fields that are dependent on the
+	 * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
+	 * reinitialized since all iocb memory resources are used many times
+	 * for transmit, receive, and continuation bpl's.
+	 */
+	iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
+	iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
+	iocb_cmd->ulpBdeCount = 1;
+	iocb_cmd->ulpLe = 1;
+
+	fcpdl = scsi_bufflen(scsi_cmnd);
+
+	if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
+		/*
+		 * We are in DIF Type 1 mode
+		 * Every data block has a 8 byte DIF (trailer)
+		 * attached to it.  Must ajust FCP data length
+		 */
+		blksize = lpfc_cmd_blksize(scsi_cmnd);
+		diflen = (fcpdl / blksize) * 8;
+		fcpdl += diflen;
+	}
+	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
+
+	/*
+	 * Due to difference in data length between DIF/non-DIF paths,
+	 * we need to set word 4 of IOCB here
+	 */
+	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
+
+	return 0;
+err:
+	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+			"9023 Could not setup all needed BDE's"
+			"prot_group_type=%d, num_bde=%d\n",
+			prot_group_type, num_bde);
+	return 1;
+}
+
+/*
+ * This function checks for BlockGuard errors detected by
+ * the HBA.  In case of errors, the ASC/ASCQ fields in the
+ * sense buffer will be set accordingly, paired with
+ * ILLEGAL_REQUEST to signal to the kernel that the HBA
+ * detected corruption.
+ *
+ * Returns:
+ *  0 - No error found
+ *  1 - BlockGuard error found
+ * -1 - Internal error (bad profile, ...etc)
+ */
+static int
+lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
+			struct lpfc_iocbq *pIocbOut)
+{
+	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+	struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
+	int ret = 0;
+	uint32_t bghm = bgf->bghm;
+	uint32_t bgstat = bgf->bgstat;
+	uint64_t failing_sector = 0;
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
+			" 0x%x lba 0x%llx blk cnt 0x%x "
+			"bgstat=0x%x bghm=0x%x\n",
+			cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
+			blk_rq_sectors(cmd->request), bgstat, bghm);
+
+	spin_lock(&_dump_buf_lock);
+	if (!_dump_buf_done) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
+			" Data for %u blocks to debugfs\n",
+				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
+		lpfc_debug_save_data(phba, cmd);
+
+		/* If we have a prot sgl, save the DIF buffer */
+		if (lpfc_prot_group_type(phba, cmd) ==
+				LPFC_PG_TYPE_DIF_BUF) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
+				"Saving DIF for %u blocks to debugfs\n",
+				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
+			lpfc_debug_save_dif(phba, cmd);
+		}
+
+		_dump_buf_done = 1;
+	}
+	spin_unlock(&_dump_buf_lock);
+
+	if (lpfc_bgs_get_invalid_prof(bgstat)) {
+		cmd->result = ScsiResult(DID_ERROR, 0);
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid"
+			" BlockGuard profile. bgstat:0x%x\n",
+			bgstat);
+		ret = (-1);
+		goto out;
+	}
+
+	if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
+		cmd->result = ScsiResult(DID_ERROR, 0);
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: "
+				"Invalid BlockGuard DIF Block. bgstat:0x%x\n",
+				bgstat);
+		ret = (-1);
+		goto out;
+	}
+
+	if (lpfc_bgs_get_guard_err(bgstat)) {
+		ret = 1;
+
+		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+				0x10, 0x1);
+		cmd->result = DRIVER_SENSE << 24
+			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+		phba->bg_guard_err_cnt++;
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9055 BLKGRD: guard_tag error\n");
+	}
+
+	if (lpfc_bgs_get_reftag_err(bgstat)) {
+		ret = 1;
+
+		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+				0x10, 0x3);
+		cmd->result = DRIVER_SENSE << 24
+			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+		phba->bg_reftag_err_cnt++;
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9056 BLKGRD: ref_tag error\n");
+	}
+
+	if (lpfc_bgs_get_apptag_err(bgstat)) {
+		ret = 1;
+
+		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+				0x10, 0x2);
+		cmd->result = DRIVER_SENSE << 24
+			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+		phba->bg_apptag_err_cnt++;
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9061 BLKGRD: app_tag error\n");
+	}
+
+	if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
+		/*
+		 * setup sense data descriptor 0 per SPC-4 as an information
+		 * field, and put the failing LBA in it.
+		 * This code assumes there was also a guard/app/ref tag error
+		 * indication.
+		 */
+		cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
+		cmd->sense_buffer[8] = 0;     /* Information descriptor type */
+		cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
+		cmd->sense_buffer[10] = 0x80; /* Validity bit */
+
+		/* bghm is a "on the wire" FC frame based count */
+		switch (scsi_get_prot_op(cmd)) {
+		case SCSI_PROT_READ_INSERT:
+		case SCSI_PROT_WRITE_STRIP:
+			bghm /= cmd->device->sector_size;
+			break;
+		case SCSI_PROT_READ_STRIP:
+		case SCSI_PROT_WRITE_INSERT:
+		case SCSI_PROT_READ_PASS:
+		case SCSI_PROT_WRITE_PASS:
+			bghm /= (cmd->device->sector_size +
+				sizeof(struct scsi_dif_tuple));
+			break;
+		}
+
+		failing_sector = scsi_get_lba(cmd);
+		failing_sector += bghm;
+
+		/* Descriptor Information */
+		put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
+	}
+
+	if (!ret) {
+		/* No error was reported - problem in FW? */
+		cmd->result = ScsiResult(DID_ERROR, 0);
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+			"9057 BLKGRD: Unknown error reported!\n");
+	}
+
+out:
+	return ret;
+}
+
+/**
+ * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
+ * field of @lpfc_cmd for device with SLI-4 interface spec.
+ *
+ * Return codes:
+ *	1 - Error
+ *	0 - Success
+ **/
+static int
+lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+	struct scatterlist *sgel = NULL;
+	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+	struct sli4_sge *first_data_sgl;
+	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+	dma_addr_t physaddr;
+	uint32_t num_bde = 0;
+	uint32_t dma_len;
+	uint32_t dma_offset = 0;
+	int nseg;
+	struct ulp_bde64 *bde;
+
+	/*
+	 * There are three possibilities here - use scatter-gather segment, use
+	 * the single mapping, or neither.  Start the lpfc command prep by
+	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
+	 * data bde entry.
+	 */
+	if (scsi_sg_count(scsi_cmnd)) {
+		/*
+		 * The driver stores the segment count returned from pci_map_sg
+		 * because this a count of dma-mappings used to map the use_sg
+		 * pages.  They are not guaranteed to be the same for those
+		 * architectures that implement an IOMMU.
+		 */
+
+		nseg = scsi_dma_map(scsi_cmnd);
+		if (unlikely(!nseg))
+			return 1;
+		sgl += 1;
+		/* clear the last flag in the fcp_rsp map entry */
+		sgl->word2 = le32_to_cpu(sgl->word2);
+		bf_set(lpfc_sli4_sge_last, sgl, 0);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+		sgl += 1;
+		first_data_sgl = sgl;
+		lpfc_cmd->seg_cnt = nseg;
+		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
+				" %s: Too many sg segments from "
+				"dma_map_sg.  Config %d, seg_cnt %d\n",
+				__func__, phba->cfg_sg_seg_cnt,
+			       lpfc_cmd->seg_cnt);
+			scsi_dma_unmap(scsi_cmnd);
+			return 1;
+		}
+
+		/*
+		 * The driver established a maximum scatter-gather segment count
+		 * during probe that limits the number of sg elements in any
+		 * single scsi command.  Just run through the seg_cnt and format
+		 * the sge's.
+		 * When using SLI-3 the driver will try to fit all the BDEs into
+		 * the IOCB. If it can't then the BDEs get added to a BPL as it
+		 * does for SLI-2 mode.
+		 */
+		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
+			physaddr = sg_dma_address(sgel);
+			dma_len = sg_dma_len(sgel);
+			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
+			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+			sgl->word2 = le32_to_cpu(sgl->word2);
+			if ((num_bde + 1) == nseg)
+				bf_set(lpfc_sli4_sge_last, sgl, 1);
+			else
+				bf_set(lpfc_sli4_sge_last, sgl, 0);
+			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+			sgl->word2 = cpu_to_le32(sgl->word2);
+			sgl->sge_len = cpu_to_le32(dma_len);
+			dma_offset += dma_len;
+			sgl++;
+		}
+		/* setup the performance hint (first data BDE) if enabled */
+		if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
+			bde = (struct ulp_bde64 *)
+					&(iocb_cmd->unsli3.sli3Words[5]);
+			bde->addrLow = first_data_sgl->addr_lo;
+			bde->addrHigh = first_data_sgl->addr_hi;
+			bde->tus.f.bdeSize =
+					le32_to_cpu(first_data_sgl->sge_len);
+			bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+			bde->tus.w = cpu_to_le32(bde->tus.w);
+		}
+	} else {
+		sgl += 1;
+		/* clear the last flag in the fcp_rsp map entry */
+		sgl->word2 = le32_to_cpu(sgl->word2);
+		bf_set(lpfc_sli4_sge_last, sgl, 1);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+	}
+
+	/*
+	 * Finish initializing those IOCB fields that are dependent on the
+	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
+	 * explicitly reinitialized.
+	 * all iocb memory resources are reused.
+	 */
+	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
+
+	/*
+	 * Due to difference in data length between DIF/non-DIF paths,
+	 * we need to set word 4 of IOCB here
+	 */
+	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+	return 0;
+}
+
+/**
+ * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be adjusted.
+ *
+ * Adjust the data length to account for how much data
+ * is actually on the wire.
+ *
+ * returns the adjusted data length
+ **/
+static int
+lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
+		struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct scsi_cmnd *sc = lpfc_cmd->pCmd;
+	int diflen, fcpdl;
+	unsigned blksize;
+
+	fcpdl = scsi_bufflen(sc);
+
+	/* Check if there is protection data on the wire */
+	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
+		/* Read */
+		if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
+			return fcpdl;
+
+	} else {
+		/* Write */
+		if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
+			return fcpdl;
+	}
+
+	/* If protection data on the wire, adjust the count accordingly */
+	blksize = lpfc_cmd_blksize(sc);
+	diflen = (fcpdl / blksize) * 8;
+	fcpdl += diflen;
+	return fcpdl;
+}
+
+/**
+ * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This is the protection/DIF aware version of
+ * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
+ * two functions eventually, but for now, it's here
+ **/
+static int
+lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
+		struct lpfc_scsi_buf *lpfc_cmd)
+{
+	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+	struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
+	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+	uint32_t num_bde = 0;
+	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
+	int prot_group_type = 0;
+	int fcpdl;
+
+	/*
+	 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
+	 *  fcp_rsp regions to the first data bde entry
+	 */
+	if (scsi_sg_count(scsi_cmnd)) {
+		/*
+		 * The driver stores the segment count returned from pci_map_sg
+		 * because this a count of dma-mappings used to map the use_sg
+		 * pages.  They are not guaranteed to be the same for those
+		 * architectures that implement an IOMMU.
+		 */
+		datasegcnt = dma_map_sg(&phba->pcidev->dev,
+					scsi_sglist(scsi_cmnd),
+					scsi_sg_count(scsi_cmnd), datadir);
+		if (unlikely(!datasegcnt))
+			return 1;
+
+		sgl += 1;
+		/* clear the last flag in the fcp_rsp map entry */
+		sgl->word2 = le32_to_cpu(sgl->word2);
+		bf_set(lpfc_sli4_sge_last, sgl, 0);
+		sgl->word2 = cpu_to_le32(sgl->word2);
+
+		sgl += 1;
+		lpfc_cmd->seg_cnt = datasegcnt;
+		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9087 BLKGRD: %s: Too many sg segments"
+					" from dma_map_sg.  Config %d, seg_cnt"
+					" %d\n",
+					__func__, phba->cfg_sg_seg_cnt,
+					lpfc_cmd->seg_cnt);
+			scsi_dma_unmap(scsi_cmnd);
+			return 1;
+		}
+
+		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
+
+		switch (prot_group_type) {
+		case LPFC_PG_TYPE_NO_DIF:
+			num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
+					datasegcnt);
+			/* we should have 2 or more entries in buffer list */
+			if (num_bde < 2)
+				goto err;
+			break;
+		case LPFC_PG_TYPE_DIF_BUF:{
+			/*
+			 * This type indicates that protection buffers are
+			 * passed to the driver, so that needs to be prepared
+			 * for DMA
+			 */
+			protsegcnt = dma_map_sg(&phba->pcidev->dev,
+					scsi_prot_sglist(scsi_cmnd),
+					scsi_prot_sg_count(scsi_cmnd), datadir);
+			if (unlikely(!protsegcnt)) {
+				scsi_dma_unmap(scsi_cmnd);
+				return 1;
+			}
+
+			lpfc_cmd->prot_seg_cnt = protsegcnt;
+			if (lpfc_cmd->prot_seg_cnt
+			    > phba->cfg_prot_sg_seg_cnt) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+					"9088 BLKGRD: %s: Too many prot sg "
+					"segments from dma_map_sg.  Config %d,"
+						"prot_seg_cnt %d\n", __func__,
+						phba->cfg_prot_sg_seg_cnt,
+						lpfc_cmd->prot_seg_cnt);
+				dma_unmap_sg(&phba->pcidev->dev,
+					     scsi_prot_sglist(scsi_cmnd),
+					     scsi_prot_sg_count(scsi_cmnd),
+					     datadir);
+				scsi_dma_unmap(scsi_cmnd);
+				return 1;
+			}
+
+			num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
+					datasegcnt, protsegcnt);
+			/* we should have 3 or more entries in buffer list */
+			if (num_bde < 3)
+				goto err;
+			break;
+		}
+		case LPFC_PG_TYPE_INVALID:
+		default:
+			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+					"9083 Unexpected protection group %i\n",
+					prot_group_type);
+			return 1;
+		}
+	}
+
+	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
+
+	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
+
+	/*
+	 * Due to difference in data length between DIF/non-DIF paths,
+	 * we need to set word 4 of IOCB here
+	 */
+	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
+	lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF;
+
+	return 0;
+err:
+	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+			"9084 Could not setup all needed BDE's"
+			"prot_group_type=%d, num_bde=%d\n",
+			prot_group_type, num_bde);
+	return 1;
+}
+
+/**
+ * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine wraps the actual DMA mapping function pointer from the
+ * lpfc_hba struct.
+ *
+ * Return codes:
+ *	1 - Error
+ *	0 - Success
+ **/
+static inline int
+lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+	return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
+}
+
+/**
+ * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
+ * using BlockGuard.
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine wraps the actual DMA mapping function pointer from the
+ * lpfc_hba struct.
+ *
+ * Return codes:
+ *	1 - Error
+ *	0 - Success
+ **/
+static inline int
+lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+	return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
+}
+
+/**
+ * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
+ * @phba: Pointer to hba context object.
+ * @vport: Pointer to vport object.
+ * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
+ * @rsp_iocb: Pointer to response iocb object which reported error.
+ *
+ * This function posts an event when there is a SCSI command reporting
+ * error from the scsi device.
+ **/
+static void
+lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
+		struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
+	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
+	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
+	uint32_t resp_info = fcprsp->rspStatus2;
+	uint32_t scsi_status = fcprsp->rspStatus3;
+	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
+	struct lpfc_fast_path_event *fast_path_evt = NULL;
+	struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
+	unsigned long flags;
+
+	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+		return;
+
+	/* If there is queuefull or busy condition send a scsi event */
+	if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
+		(cmnd->result == SAM_STAT_BUSY)) {
+		fast_path_evt = lpfc_alloc_fast_evt(phba);
+		if (!fast_path_evt)
+			return;
+		fast_path_evt->un.scsi_evt.event_type =
+			FC_REG_SCSI_EVENT;
+		fast_path_evt->un.scsi_evt.subcategory =
+		(cmnd->result == SAM_STAT_TASK_SET_FULL) ?
+		LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
+		fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
+		memcpy(&fast_path_evt->un.scsi_evt.wwpn,
+			&pnode->nlp_portname, sizeof(struct lpfc_name));
+		memcpy(&fast_path_evt->un.scsi_evt.wwnn,
+			&pnode->nlp_nodename, sizeof(struct lpfc_name));
+	} else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
+		((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
+		fast_path_evt = lpfc_alloc_fast_evt(phba);
+		if (!fast_path_evt)
+			return;
+		fast_path_evt->un.check_cond_evt.scsi_event.event_type =
+			FC_REG_SCSI_EVENT;
+		fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
+			LPFC_EVENT_CHECK_COND;
+		fast_path_evt->un.check_cond_evt.scsi_event.lun =
+			cmnd->device->lun;
+		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
+			&pnode->nlp_portname, sizeof(struct lpfc_name));
+		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
+			&pnode->nlp_nodename, sizeof(struct lpfc_name));
+		fast_path_evt->un.check_cond_evt.sense_key =
+			cmnd->sense_buffer[2] & 0xf;
+		fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
+		fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
+	} else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
+		     fcpi_parm &&
+		     ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
+			((scsi_status == SAM_STAT_GOOD) &&
+			!(resp_info & (RESID_UNDER | RESID_OVER))))) {
+		/*
+		 * If status is good or resid does not match with fcp_param and
+		 * there is valid fcpi_parm, then there is a read_check error
+		 */
+		fast_path_evt = lpfc_alloc_fast_evt(phba);
+		if (!fast_path_evt)
+			return;
+		fast_path_evt->un.read_check_error.header.event_type =
+			FC_REG_FABRIC_EVENT;
+		fast_path_evt->un.read_check_error.header.subcategory =
+			LPFC_EVENT_FCPRDCHKERR;
+		memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
+			&pnode->nlp_portname, sizeof(struct lpfc_name));
+		memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
+			&pnode->nlp_nodename, sizeof(struct lpfc_name));
+		fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
+		fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
+		fast_path_evt->un.read_check_error.fcpiparam =
+			fcpi_parm;
+	} else
+		return;
+
+	fast_path_evt->vport = vport;
+	spin_lock_irqsave(&phba->hbalock, flags);
+	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	lpfc_worker_wake_up(phba);
+	return;
+}
+
+/**
+ * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
+ * @phba: The HBA for which this call is being executed.
+ * @psb: The scsi buffer which is going to be un-mapped.
+ *
+ * This routine does DMA un-mapping of scatter gather list of scsi command
+ * field of @lpfc_cmd for device with SLI-3 interface spec.
+ **/
+static void
+lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+	/*
+	 * There are only two special cases to consider.  (1) the scsi command
+	 * requested scatter-gather usage or (2) the scsi command allocated
+	 * a request buffer, but did not request use_sg.  There is a third
+	 * case, but it does not require resource deallocation.
+	 */
+	if (psb->seg_cnt > 0)
+		scsi_dma_unmap(psb->pCmd);
+	if (psb->prot_seg_cnt > 0)
+		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
+				scsi_prot_sg_count(psb->pCmd),
+				psb->pCmd->sc_data_direction);
+}
+
+/**
+ * lpfc_handler_fcp_err - FCP response handler
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @rsp_iocb: The response IOCB which contains FCP error.
+ *
+ * This routine is called to process response IOCB with status field
+ * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
+ * based upon SCSI and FCP error.
+ **/
+static void
+lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+		    struct lpfc_iocbq *rsp_iocb)
+{
+	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
+	struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
+	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
+	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
+	uint32_t resp_info = fcprsp->rspStatus2;
+	uint32_t scsi_status = fcprsp->rspStatus3;
+	uint32_t *lp;
+	uint32_t host_status = DID_OK;
+	uint32_t rsplen = 0;
+	uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
+
+
+	/*
+	 *  If this is a task management command, there is no
+	 *  scsi packet associated with this lpfc_cmd.  The driver
+	 *  consumes it.
+	 */
+	if (fcpcmd->fcpCntl2) {
+		scsi_status = 0;
+		goto out;
+	}
+
+	if (resp_info & RSP_LEN_VALID) {
+		rsplen = be32_to_cpu(fcprsp->rspRspLen);
+		if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+				 "2719 Invalid response length: "
+				 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
+				 cmnd->device->id,
+				 cmnd->device->lun, cmnd->cmnd[0],
+				 rsplen);
+			host_status = DID_ERROR;
+			goto out;
+		}
+		if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+				 "2757 Protocol failure detected during "
+				 "processing of FCP I/O op: "
+				 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
+				 cmnd->device->id,
+				 cmnd->device->lun, cmnd->cmnd[0],
+				 fcprsp->rspInfo3);
+			host_status = DID_ERROR;
+			goto out;
+		}
+	}
+
+	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
+		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
+		if (snslen > SCSI_SENSE_BUFFERSIZE)
+			snslen = SCSI_SENSE_BUFFERSIZE;
+
+		if (resp_info & RSP_LEN_VALID)
+		  rsplen = be32_to_cpu(fcprsp->rspRspLen);
+		memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
+	}
+	lp = (uint32_t *)cmnd->sense_buffer;
+
+	if (!scsi_status && (resp_info & RESID_UNDER) &&
+		vport->cfg_log_verbose & LOG_FCP_UNDER)
+		logit = LOG_FCP_UNDER;
+
+	lpfc_printf_vlog(vport, KERN_WARNING, logit,
+			 "9024 FCP command x%x failed: x%x SNS x%x x%x "
+			 "Data: x%x x%x x%x x%x x%x\n",
+			 cmnd->cmnd[0], scsi_status,
+			 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
+			 be32_to_cpu(fcprsp->rspResId),
+			 be32_to_cpu(fcprsp->rspSnsLen),
+			 be32_to_cpu(fcprsp->rspRspLen),
+			 fcprsp->rspInfo3);
+
+	scsi_set_resid(cmnd, 0);
+	if (resp_info & RESID_UNDER) {
+		scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
+
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
+				 "9025 FCP Read Underrun, expected %d, "
+				 "residual %d Data: x%x x%x x%x\n",
+				 be32_to_cpu(fcpcmd->fcpDl),
+				 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
+				 cmnd->underflow);
+
+		/*
+		 * If there is an under run check if under run reported by
+		 * storage array is same as the under run reported by HBA.
+		 * If this is not same, there is a dropped frame.
+		 */
+		if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
+			fcpi_parm &&
+			(scsi_get_resid(cmnd) != fcpi_parm)) {
+			lpfc_printf_vlog(vport, KERN_WARNING,
+					 LOG_FCP | LOG_FCP_ERROR,
+					 "9026 FCP Read Check Error "
+					 "and Underrun Data: x%x x%x x%x x%x\n",
+					 be32_to_cpu(fcpcmd->fcpDl),
+					 scsi_get_resid(cmnd), fcpi_parm,
+					 cmnd->cmnd[0]);
+			scsi_set_resid(cmnd, scsi_bufflen(cmnd));
+			host_status = DID_ERROR;
+		}
+		/*
+		 * The cmnd->underflow is the minimum number of bytes that must
+		 * be transferred for this command.  Provided a sense condition
+		 * is not present, make sure the actual amount transferred is at
+		 * least the underflow value or fail.
+		 */
+		if (!(resp_info & SNS_LEN_VALID) &&
+		    (scsi_status == SAM_STAT_GOOD) &&
+		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
+		     < cmnd->underflow)) {
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+					 "9027 FCP command x%x residual "
+					 "underrun converted to error "
+					 "Data: x%x x%x x%x\n",
+					 cmnd->cmnd[0], scsi_bufflen(cmnd),
+					 scsi_get_resid(cmnd), cmnd->underflow);
+			host_status = DID_ERROR;
+		}
+	} else if (resp_info & RESID_OVER) {
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+				 "9028 FCP command x%x residual overrun error. "
+				 "Data: x%x x%x\n", cmnd->cmnd[0],
+				 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
+		host_status = DID_ERROR;
+
+	/*
+	 * Check SLI validation that all the transfer was actually done
+	 * (fcpi_parm should be zero). Apply check only to reads.
+	 */
+	} else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
+				 "9029 FCP Read Check Error Data: "
+				 "x%x x%x x%x x%x x%x\n",
+				 be32_to_cpu(fcpcmd->fcpDl),
+				 be32_to_cpu(fcprsp->rspResId),
+				 fcpi_parm, cmnd->cmnd[0], scsi_status);
+		switch (scsi_status) {
+		case SAM_STAT_GOOD:
+		case SAM_STAT_CHECK_CONDITION:
+			/* Fabric dropped a data frame. Fail any successful
+			 * command in which we detected dropped frames.
+			 * A status of good or some check conditions could
+			 * be considered a successful command.
+			 */
+			host_status = DID_ERROR;
+			break;
+		}
+		scsi_set_resid(cmnd, scsi_bufflen(cmnd));
+	}
+
+ out:
+	cmnd->result = ScsiResult(host_status, scsi_status);
+	lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
+}
+
+/**
+ * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
+ * @phba: The Hba for which this call is being executed.
+ * @pIocbIn: The command IOCBQ for the scsi cmnd.
+ * @pIocbOut: The response IOCBQ for the scsi cmnd.
+ *
+ * This routine assigns scsi command result by looking into response IOCB
+ * status field appropriately. This routine handles QUEUE FULL condition as
+ * well by ramping down device queue depth.
+ **/
+static void
+lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
+			struct lpfc_iocbq *pIocbOut)
+{
+	struct lpfc_scsi_buf *lpfc_cmd =
+		(struct lpfc_scsi_buf *) pIocbIn->context1;
+	struct lpfc_vport      *vport = pIocbIn->vport;
+	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
+	struct lpfc_nodelist *pnode = rdata->pnode;
+	struct scsi_cmnd *cmd;
+	int result;
+	struct scsi_device *tmp_sdev;
+	int depth;
+	unsigned long flags;
+	struct lpfc_fast_path_event *fast_path_evt;
+	struct Scsi_Host *shost;
+	uint32_t queue_depth, scsi_id;
+	uint32_t logit = LOG_FCP;
+
+	/* Sanity check on return of outstanding command */
+	if (!(lpfc_cmd->pCmd))
+		return;
+	cmd = lpfc_cmd->pCmd;
+	shost = cmd->device->host;
+
+	lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
+	lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
+	/* pick up SLI4 exhange busy status from HBA */
+	lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	if (lpfc_cmd->prot_data_type) {
+		struct scsi_dif_tuple *src = NULL;
+
+		src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
+		/*
+		 * Used to restore any changes to protection
+		 * data for error injection.
+		 */
+		switch (lpfc_cmd->prot_data_type) {
+		case LPFC_INJERR_REFTAG:
+			src->ref_tag =
+				lpfc_cmd->prot_data;
+			break;
+		case LPFC_INJERR_APPTAG:
+			src->app_tag =
+				(uint16_t)lpfc_cmd->prot_data;
+			break;
+		case LPFC_INJERR_GUARD:
+			src->guard_tag =
+				(uint16_t)lpfc_cmd->prot_data;
+			break;
+		default:
+			break;
+		}
+
+		lpfc_cmd->prot_data = 0;
+		lpfc_cmd->prot_data_type = 0;
+		lpfc_cmd->prot_data_segment = NULL;
+	}
+#endif
+	if (pnode && NLP_CHK_NODE_ACT(pnode))
+		atomic_dec(&pnode->cmd_pending);
+
+	if (lpfc_cmd->status) {
+		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
+		    (lpfc_cmd->result & IOERR_DRVR_MASK))
+			lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+		else if (lpfc_cmd->status >= IOSTAT_CNT)
+			lpfc_cmd->status = IOSTAT_DEFAULT;
+		if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR
+			&& !lpfc_cmd->fcp_rsp->rspStatus3
+			&& (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER)
+			&& !(phba->cfg_log_verbose & LOG_FCP_UNDER))
+			logit = 0;
+		else
+			logit = LOG_FCP | LOG_FCP_UNDER;
+		lpfc_printf_vlog(vport, KERN_WARNING, logit,
+			 "9030 FCP cmd x%x failed <%d/%d> "
+			 "status: x%x result: x%x Data: x%x x%x\n",
+			 cmd->cmnd[0],
+			 cmd->device ? cmd->device->id : 0xffff,
+			 cmd->device ? cmd->device->lun : 0xffff,
+			 lpfc_cmd->status, lpfc_cmd->result,
+			 pIocbOut->iocb.ulpContext,
+			 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
+
+		switch (lpfc_cmd->status) {
+		case IOSTAT_FCP_RSP_ERROR:
+			/* Call FCP RSP handler to determine result */
+			lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
+			break;
+		case IOSTAT_NPORT_BSY:
+		case IOSTAT_FABRIC_BSY:
+			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
+			fast_path_evt = lpfc_alloc_fast_evt(phba);
+			if (!fast_path_evt)
+				break;
+			fast_path_evt->un.fabric_evt.event_type =
+				FC_REG_FABRIC_EVENT;
+			fast_path_evt->un.fabric_evt.subcategory =
+				(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
+				LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
+			if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+				memcpy(&fast_path_evt->un.fabric_evt.wwpn,
+					&pnode->nlp_portname,
+					sizeof(struct lpfc_name));
+				memcpy(&fast_path_evt->un.fabric_evt.wwnn,
+					&pnode->nlp_nodename,
+					sizeof(struct lpfc_name));
+			}
+			fast_path_evt->vport = vport;
+			fast_path_evt->work_evt.evt =
+				LPFC_EVT_FASTPATH_MGMT_EVT;
+			spin_lock_irqsave(&phba->hbalock, flags);
+			list_add_tail(&fast_path_evt->work_evt.evt_listp,
+				&phba->work_list);
+			spin_unlock_irqrestore(&phba->hbalock, flags);
+			lpfc_worker_wake_up(phba);
+			break;
+		case IOSTAT_LOCAL_REJECT:
+		case IOSTAT_REMOTE_STOP:
+			if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
+			    lpfc_cmd->result ==
+					IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
+			    lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
+			    lpfc_cmd->result ==
+					IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
+				cmd->result = ScsiResult(DID_NO_CONNECT, 0);
+				break;
+			}
+			if (lpfc_cmd->result == IOERR_INVALID_RPI ||
+			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
+			    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
+			    lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
+				cmd->result = ScsiResult(DID_REQUEUE, 0);
+				break;
+			}
+			if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
+			     lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
+			     pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
+				if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
+					/*
+					 * This is a response for a BG enabled
+					 * cmd. Parse BG error
+					 */
+					lpfc_parse_bg_err(phba, lpfc_cmd,
+							pIocbOut);
+					break;
+				} else {
+					lpfc_printf_vlog(vport, KERN_WARNING,
+							LOG_BG,
+							"9031 non-zero BGSTAT "
+							"on unprotected cmd\n");
+				}
+			}
+			if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
+				&& (phba->sli_rev == LPFC_SLI_REV4)
+				&& (pnode && NLP_CHK_NODE_ACT(pnode))) {
+				/* This IO was aborted by the target, we don't
+				 * know the rxid and because we did not send the
+				 * ABTS we cannot generate and RRQ.
+				 */
+				lpfc_set_rrq_active(phba, pnode,
+						lpfc_cmd->cur_iocbq.sli4_xritag,
+						0, 0);
+			}
+		/* else: fall through */
+		default:
+			cmd->result = ScsiResult(DID_ERROR, 0);
+			break;
+		}
+
+		if (!pnode || !NLP_CHK_NODE_ACT(pnode)
+		    || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
+			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
+						 SAM_STAT_BUSY);
+	} else
+		cmd->result = ScsiResult(DID_OK, 0);
+
+	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
+		uint32_t *lp = (uint32_t *)cmd->sense_buffer;
+
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+				 "0710 Iodone <%d/%d> cmd %p, error "
+				 "x%x SNS x%x x%x Data: x%x x%x\n",
+				 cmd->device->id, cmd->device->lun, cmd,
+				 cmd->result, *lp, *(lp + 3), cmd->retries,
+				 scsi_get_resid(cmd));
+	}
+
+	lpfc_update_stats(phba, lpfc_cmd);
+	result = cmd->result;
+	if (vport->cfg_max_scsicmpl_time &&
+	   time_after(jiffies, lpfc_cmd->start_time +
+		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
+		spin_lock_irqsave(shost->host_lock, flags);
+		if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+			if (pnode->cmd_qdepth >
+				atomic_read(&pnode->cmd_pending) &&
+				(atomic_read(&pnode->cmd_pending) >
+				LPFC_MIN_TGT_QDEPTH) &&
+				((cmd->cmnd[0] == READ_10) ||
+				(cmd->cmnd[0] == WRITE_10)))
+				pnode->cmd_qdepth =
+					atomic_read(&pnode->cmd_pending);
+
+			pnode->last_change_time = jiffies;
+		}
+		spin_unlock_irqrestore(shost->host_lock, flags);
+	} else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+		if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
+		   time_after(jiffies, pnode->last_change_time +
+			      msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
+			spin_lock_irqsave(shost->host_lock, flags);
+			depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
+				/ 100;
+			depth = depth ? depth : 1;
+			pnode->cmd_qdepth += depth;
+			if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
+				pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
+			pnode->last_change_time = jiffies;
+			spin_unlock_irqrestore(shost->host_lock, flags);
+		}
+	}
+
+	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
+
+	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
+	queue_depth = cmd->device->queue_depth;
+	scsi_id = cmd->device->id;
+	cmd->scsi_done(cmd);
+
+	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
+		/*
+		 * If there is a thread waiting for command completion
+		 * wake up the thread.
+		 */
+		spin_lock_irqsave(shost->host_lock, flags);
+		lpfc_cmd->pCmd = NULL;
+		if (lpfc_cmd->waitq)
+			wake_up(lpfc_cmd->waitq);
+		spin_unlock_irqrestore(shost->host_lock, flags);
+		lpfc_release_scsi_buf(phba, lpfc_cmd);
+		return;
+	}
+
+	if (!result)
+		lpfc_rampup_queue_depth(vport, queue_depth);
+
+	/*
+	 * Check for queue full.  If the lun is reporting queue full, then
+	 * back off the lun queue depth to prevent target overloads.
+	 */
+	if (result == SAM_STAT_TASK_SET_FULL && pnode &&
+	    NLP_CHK_NODE_ACT(pnode)) {
+		shost_for_each_device(tmp_sdev, shost) {
+			if (tmp_sdev->id != scsi_id)
+				continue;
+			depth = scsi_track_queue_full(tmp_sdev,
+						      tmp_sdev->queue_depth-1);
+			if (depth <= 0)
+				continue;
+			lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+					 "0711 detected queue full - lun queue "
+					 "depth adjusted to %d.\n", depth);
+			lpfc_send_sdev_queuedepth_change_event(phba, vport,
+							       pnode,
+							       tmp_sdev->lun,
+							       depth+1, depth);
+		}
+	}
+
+	/*
+	 * If there is a thread waiting for command completion
+	 * wake up the thread.
+	 */
+	spin_lock_irqsave(shost->host_lock, flags);
+	lpfc_cmd->pCmd = NULL;
+	if (lpfc_cmd->waitq)
+		wake_up(lpfc_cmd->waitq);
+	spin_unlock_irqrestore(shost->host_lock, flags);
+
+	lpfc_release_scsi_buf(phba, lpfc_cmd);
+}
+
+/**
+ * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
+ * @data: A pointer to the immediate command data portion of the IOCB.
+ * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
+ *
+ * The routine copies the entire FCP command from @fcp_cmnd to @data while
+ * byte swapping the data to big endian format for transmission on the wire.
+ **/
+static void
+lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
+{
+	int i, j;
+	for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
+	     i += sizeof(uint32_t), j++) {
+		((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
+	}
+}
+
+/**
+ * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: The scsi command which needs to send.
+ * @pnode: Pointer to lpfc_nodelist.
+ *
+ * This routine initializes fcp_cmnd and iocb data structure from scsi command
+ * to transfer for device with SLI3 interface spec.
+ **/
+static void
+lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+		    struct lpfc_nodelist *pnode)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+	struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
+	int datadir = scsi_cmnd->sc_data_direction;
+	char tag[2];
+
+	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+		return;
+
+	lpfc_cmd->fcp_rsp->rspSnsLen = 0;
+	/* clear task management bits */
+	lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
+
+	int_to_scsilun(lpfc_cmd->pCmd->device->lun,
+			&lpfc_cmd->fcp_cmnd->fcp_lun);
+
+	memset(&fcp_cmnd->fcpCdb[0], 0, LPFC_FCP_CDB_LEN);
+	memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
+	if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
+		switch (tag[0]) {
+		case HEAD_OF_QUEUE_TAG:
+			fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
+			break;
+		case ORDERED_QUEUE_TAG:
+			fcp_cmnd->fcpCntl1 = ORDERED_Q;
+			break;
+		default:
+			fcp_cmnd->fcpCntl1 = SIMPLE_Q;
+			break;
+		}
+	} else
+		fcp_cmnd->fcpCntl1 = 0;
+
+	/*
+	 * There are three possibilities here - use scatter-gather segment, use
+	 * the single mapping, or neither.  Start the lpfc command prep by
+	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
+	 * data bde entry.
+	 */
+	if (scsi_sg_count(scsi_cmnd)) {
+		if (datadir == DMA_TO_DEVICE) {
+			iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
+			if (phba->sli_rev < LPFC_SLI_REV4) {
+				iocb_cmd->un.fcpi.fcpi_parm = 0;
+				iocb_cmd->ulpPU = 0;
+			} else
+				iocb_cmd->ulpPU = PARM_READ_CHECK;
+			fcp_cmnd->fcpCntl3 = WRITE_DATA;
+			phba->fc4OutputRequests++;
+		} else {
+			iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
+			iocb_cmd->ulpPU = PARM_READ_CHECK;
+			fcp_cmnd->fcpCntl3 = READ_DATA;
+			phba->fc4InputRequests++;
+		}
+	} else {
+		iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
+		iocb_cmd->un.fcpi.fcpi_parm = 0;
+		iocb_cmd->ulpPU = 0;
+		fcp_cmnd->fcpCntl3 = 0;
+		phba->fc4ControlRequests++;
+	}
+	if (phba->sli_rev == 3 &&
+	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
+		lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
+	/*
+	 * Finish initializing those IOCB fields that are independent
+	 * of the scsi_cmnd request_buffer
+	 */
+	piocbq->iocb.ulpContext = pnode->nlp_rpi;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		piocbq->iocb.ulpContext =
+		  phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
+	if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
+		piocbq->iocb.ulpFCP2Rcvy = 1;
+	else
+		piocbq->iocb.ulpFCP2Rcvy = 0;
+
+	piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
+	piocbq->context1  = lpfc_cmd;
+	piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
+	piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
+	piocbq->vport = vport;
+}
+
+/**
+ * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @lun: Logical unit number.
+ * @task_mgmt_cmd: SCSI task management command.
+ *
+ * This routine creates FCP information unit corresponding to @task_mgmt_cmd
+ * for device with SLI-3 interface spec.
+ *
+ * Return codes:
+ *   0 - Error
+ *   1 - Success
+ **/
+static int
+lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
+			     struct lpfc_scsi_buf *lpfc_cmd,
+			     unsigned int lun,
+			     uint8_t task_mgmt_cmd)
+{
+	struct lpfc_iocbq *piocbq;
+	IOCB_t *piocb;
+	struct fcp_cmnd *fcp_cmnd;
+	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
+	struct lpfc_nodelist *ndlp = rdata->pnode;
+
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+	    ndlp->nlp_state != NLP_STE_MAPPED_NODE)
+		return 0;
+
+	piocbq = &(lpfc_cmd->cur_iocbq);
+	piocbq->vport = vport;
+
+	piocb = &piocbq->iocb;
+
+	fcp_cmnd = lpfc_cmd->fcp_cmnd;
+	/* Clear out any old data in the FCP command area */
+	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+	int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
+	fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
+	if (vport->phba->sli_rev == 3 &&
+	    !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
+		lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
+	piocb->ulpCommand = CMD_FCP_ICMND64_CR;
+	piocb->ulpContext = ndlp->nlp_rpi;
+	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+		piocb->ulpContext =
+		  vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+	}
+	if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
+		piocb->ulpFCP2Rcvy = 1;
+	}
+	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
+
+	/* ulpTimeout is only one byte */
+	if (lpfc_cmd->timeout > 0xff) {
+		/*
+		 * Do not timeout the command at the firmware level.
+		 * The driver will provide the timeout mechanism.
+		 */
+		piocb->ulpTimeout = 0;
+	} else
+		piocb->ulpTimeout = lpfc_cmd->timeout;
+
+	if (vport->phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
+
+	return 1;
+}
+
+/**
+ * lpfc_scsi_api_table_setup - Set up scsi api function jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the SCSI interface API function jump table in @phba
+ * struct.
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+
+	phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
+	phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
+
+	switch (dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
+		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
+		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
+		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
+		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
+		break;
+	case LPFC_PCI_DEV_OC:
+		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
+		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
+		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
+		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
+		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1418 Invalid HBA PCI-device group: 0x%x\n",
+				dev_grp);
+		return -ENODEV;
+		break;
+	}
+	phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
+	phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
+	return 0;
+}
+
+/**
+ * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
+ * @phba: The Hba for which this call is being executed.
+ * @cmdiocbq: Pointer to lpfc_iocbq data structure.
+ * @rspiocbq: Pointer to lpfc_iocbq data structure.
+ *
+ * This routine is IOCB completion routine for device reset and target reset
+ * routine. This routine release scsi buffer associated with lpfc_cmd.
+ **/
+static void
+lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
+			struct lpfc_iocbq *cmdiocbq,
+			struct lpfc_iocbq *rspiocbq)
+{
+	struct lpfc_scsi_buf *lpfc_cmd =
+		(struct lpfc_scsi_buf *) cmdiocbq->context1;
+	if (lpfc_cmd)
+		lpfc_release_scsi_buf(phba, lpfc_cmd);
+	return;
+}
+
+/**
+ * lpfc_info - Info entry point of scsi_host_template data structure
+ * @host: The scsi host for which this call is being executed.
+ *
+ * This routine provides module information about hba.
+ *
+ * Reutrn code:
+ *   Pointer to char - Success.
+ **/
+const char *
+lpfc_info(struct Scsi_Host *host)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	int len;
+	static char  lpfcinfobuf[384];
+
+	memset(lpfcinfobuf,0,384);
+	if (phba && phba->pcidev){
+		strncpy(lpfcinfobuf, phba->ModelDesc, 256);
+		len = strlen(lpfcinfobuf);
+		snprintf(lpfcinfobuf + len,
+			384-len,
+			" on PCI bus %02x device %02x irq %d",
+			phba->pcidev->bus->number,
+			phba->pcidev->devfn,
+			phba->pcidev->irq);
+		len = strlen(lpfcinfobuf);
+		if (phba->Port[0]) {
+			snprintf(lpfcinfobuf + len,
+				 384-len,
+				 " port %s",
+				 phba->Port);
+		}
+		len = strlen(lpfcinfobuf);
+		if (phba->sli4_hba.link_state.logical_speed) {
+			snprintf(lpfcinfobuf + len,
+				 384-len,
+				 " Logical Link Speed: %d Mbps",
+				 phba->sli4_hba.link_state.logical_speed * 10);
+		}
+	}
+	return lpfcinfobuf;
+}
+
+/**
+ * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
+ * The default value of cfg_poll_tmo is 10 milliseconds.
+ **/
+static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
+{
+	unsigned long  poll_tmo_expires =
+		(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
+
+	if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
+		mod_timer(&phba->fcp_poll_timer,
+			  poll_tmo_expires);
+}
+
+/**
+ * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine starts the fcp_poll_timer of @phba.
+ **/
+void lpfc_poll_start_timer(struct lpfc_hba * phba)
+{
+	lpfc_poll_rearm_timer(phba);
+}
+
+/**
+ * lpfc_poll_timeout - Restart polling timer
+ * @ptr: Map to lpfc_hba data structure pointer.
+ *
+ * This routine restarts fcp_poll timer, when FCP ring  polling is enable
+ * and FCP Ring interrupt is disable.
+ **/
+
+void lpfc_poll_timeout(unsigned long ptr)
+{
+	struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
+
+	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
+		lpfc_sli_handle_fast_ring_event(phba,
+			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
+
+		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
+			lpfc_poll_rearm_timer(phba);
+	}
+}
+
+/**
+ * lpfc_queuecommand - scsi_host_template queuecommand entry point
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ * @done: Pointer to done routine.
+ *
+ * Driver registers this routine to scsi midlayer to submit a @cmd to process.
+ * This routine prepares an IOCB from scsi command and provides to firmware.
+ * The @done callback is invoked after driver finished processing the command.
+ *
+ * Return value :
+ *   0 - Success
+ *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
+ **/
+static int
+lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
+{
+	struct Scsi_Host  *shost = cmnd->device->host;
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_scsi_buf *lpfc_cmd;
+	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+	int err;
+
+	err = fc_remote_port_chkready(rport);
+	if (err) {
+		cmnd->result = err;
+		goto out_fail_command;
+	}
+	ndlp = rdata->pnode;
+
+	if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
+		(!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
+
+		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
+				"9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
+				" op:%02x str=%s without registering for"
+				" BlockGuard - Rejecting command\n",
+				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
+				dif_op_str[scsi_get_prot_op(cmnd)]);
+		goto out_fail_command;
+	}
+
+	/*
+	 * Catch race where our node has transitioned, but the
+	 * transport is still transitioning.
+	 */
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+		cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
+		goto out_fail_command;
+	}
+	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
+		goto out_tgt_busy;
+
+	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
+	if (lpfc_cmd == NULL) {
+		lpfc_rampdown_queue_depth(phba);
+
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+				 "0707 driver's buffer pool is empty, "
+				 "IO busied\n");
+		goto out_host_busy;
+	}
+
+	/*
+	 * Store the midlayer's command structure for the completion phase
+	 * and complete the command initialization.
+	 */
+	lpfc_cmd->pCmd  = cmnd;
+	lpfc_cmd->rdata = rdata;
+	lpfc_cmd->timeout = 0;
+	lpfc_cmd->start_time = jiffies;
+	cmnd->host_scribble = (unsigned char *)lpfc_cmd;
+	cmnd->scsi_done = done;
+
+	if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
+		if (vport->phba->cfg_enable_bg) {
+			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+				"9033 BLKGRD: rcvd protected cmd:%02x op=%s "
+				"guard=%s\n", cmnd->cmnd[0],
+				dif_op_str[scsi_get_prot_op(cmnd)],
+				dif_grd_str[scsi_host_get_guard(shost)]);
+			if (cmnd->cmnd[0] == READ_10)
+				lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+					"9035 BLKGRD: READ @ sector %llu, "
+					"cnt %u, rpt %d\n",
+					(unsigned long long)scsi_get_lba(cmnd),
+					blk_rq_sectors(cmnd->request),
+					(cmnd->cmnd[1]>>5));
+			else if (cmnd->cmnd[0] == WRITE_10)
+				lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+					"9036 BLKGRD: WRITE @ sector %llu, "
+					"cnt %u, wpt %d\n",
+					(unsigned long long)scsi_get_lba(cmnd),
+					blk_rq_sectors(cmnd->request),
+					(cmnd->cmnd[1]>>5));
+		}
+
+		err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
+	} else {
+		if (vport->phba->cfg_enable_bg) {
+			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+				"9038 BLKGRD: rcvd unprotected cmd:"
+				"%02x op=%s guard=%s\n", cmnd->cmnd[0],
+				dif_op_str[scsi_get_prot_op(cmnd)],
+				dif_grd_str[scsi_host_get_guard(shost)]);
+			if (cmnd->cmnd[0] == READ_10)
+				lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+					"9040 dbg: READ @ sector %llu, "
+					"cnt %u, rpt %d\n",
+					(unsigned long long)scsi_get_lba(cmnd),
+					 blk_rq_sectors(cmnd->request),
+					(cmnd->cmnd[1]>>5));
+			else if (cmnd->cmnd[0] == WRITE_10)
+				lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+					"9041 dbg: WRITE @ sector %llu, "
+					"cnt %u, wpt %d\n",
+					(unsigned long long)scsi_get_lba(cmnd),
+					blk_rq_sectors(cmnd->request),
+					(cmnd->cmnd[1]>>5));
+		}
+		err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
+	}
+
+	if (err)
+		goto out_host_busy_free_buf;
+
+	lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
+
+	atomic_inc(&ndlp->cmd_pending);
+	err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
+				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
+	if (err) {
+		atomic_dec(&ndlp->cmd_pending);
+		goto out_host_busy_free_buf;
+	}
+	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
+		spin_unlock(shost->host_lock);
+		lpfc_sli_handle_fast_ring_event(phba,
+			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
+
+		spin_lock(shost->host_lock);
+		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
+			lpfc_poll_rearm_timer(phba);
+	}
+
+	return 0;
+
+ out_host_busy_free_buf:
+	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
+	lpfc_release_scsi_buf(phba, lpfc_cmd);
+ out_host_busy:
+	return SCSI_MLQUEUE_HOST_BUSY;
+
+ out_tgt_busy:
+	return SCSI_MLQUEUE_TARGET_BUSY;
+
+ out_fail_command:
+	done(cmnd);
+	return 0;
+}
+
+static DEF_SCSI_QCMD(lpfc_queuecommand)
+
+/**
+ * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine aborts @cmnd pending in base driver.
+ *
+ * Return code :
+ *   0x2003 - Error
+ *   0x2002 - Success
+ **/
+static int
+lpfc_abort_handler(struct scsi_cmnd *cmnd)
+{
+	struct Scsi_Host  *shost = cmnd->device->host;
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_iocbq *iocb;
+	struct lpfc_iocbq *abtsiocb;
+	struct lpfc_scsi_buf *lpfc_cmd;
+	IOCB_t *cmd, *icmd;
+	int ret = SUCCESS;
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+
+	ret = fc_block_scsi_eh(cmnd);
+	if (ret)
+		return ret;
+	lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
+	if (!lpfc_cmd) {
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+			 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
+			 "x%x ID %d LUN %d\n",
+			 ret, cmnd->device->id, cmnd->device->lun);
+		return SUCCESS;
+	}
+
+	/*
+	 * If pCmd field of the corresponding lpfc_scsi_buf structure
+	 * points to a different SCSI command, then the driver has
+	 * already completed this command, but the midlayer did not
+	 * see the completion before the eh fired.  Just return
+	 * SUCCESS.
+	 */
+	iocb = &lpfc_cmd->cur_iocbq;
+	if (lpfc_cmd->pCmd != cmnd)
+		goto out;
+
+	BUG_ON(iocb->context1 != lpfc_cmd);
+
+	abtsiocb = lpfc_sli_get_iocbq(phba);
+	if (abtsiocb == NULL) {
+		ret = FAILED;
+		goto out;
+	}
+
+	/*
+	 * The scsi command can not be in txq and it is in flight because the
+	 * pCmd is still pointig at the SCSI command we have to abort. There
+	 * is no need to search the txcmplq. Just send an abort to the FW.
+	 */
+
+	cmd = &iocb->iocb;
+	icmd = &abtsiocb->iocb;
+	icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
+	icmd->un.acxri.abortContextTag = cmd->ulpContext;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
+	else
+		icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
+
+	icmd->ulpLe = 1;
+	icmd->ulpClass = cmd->ulpClass;
+
+	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
+	abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
+	abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
+
+	if (lpfc_is_link_up(phba))
+		icmd->ulpCommand = CMD_ABORT_XRI_CN;
+	else
+		icmd->ulpCommand = CMD_CLOSE_XRI_CN;
+
+	abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
+	abtsiocb->vport = vport;
+	if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
+	    IOCB_ERROR) {
+		lpfc_sli_release_iocbq(phba, abtsiocb);
+		ret = FAILED;
+		goto out;
+	}
+
+	if (phba->cfg_poll & DISABLE_FCP_RING_INT)
+		lpfc_sli_handle_fast_ring_event(phba,
+			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
+
+	lpfc_cmd->waitq = &waitq;
+	/* Wait for abort to complete */
+	wait_event_timeout(waitq,
+			  (lpfc_cmd->pCmd != cmnd),
+			   (2*vport->cfg_devloss_tmo*HZ));
+
+	spin_lock_irq(shost->host_lock);
+	lpfc_cmd->waitq = NULL;
+	spin_unlock_irq(shost->host_lock);
+
+	if (lpfc_cmd->pCmd == cmnd) {
+		ret = FAILED;
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+				 "0748 abort handler timed out waiting "
+				 "for abort to complete: ret %#x, ID %d, "
+				 "LUN %d\n",
+				 ret, cmnd->device->id, cmnd->device->lun);
+	}
+
+ out:
+	lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+			 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
+			 "LUN %d\n", ret, cmnd->device->id,
+			 cmnd->device->lun);
+	return ret;
+}
+
+static char *
+lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
+{
+	switch (task_mgmt_cmd) {
+	case FCP_ABORT_TASK_SET:
+		return "ABORT_TASK_SET";
+	case FCP_CLEAR_TASK_SET:
+		return "FCP_CLEAR_TASK_SET";
+	case FCP_BUS_RESET:
+		return "FCP_BUS_RESET";
+	case FCP_LUN_RESET:
+		return "FCP_LUN_RESET";
+	case FCP_TARGET_RESET:
+		return "FCP_TARGET_RESET";
+	case FCP_CLEAR_ACA:
+		return "FCP_CLEAR_ACA";
+	case FCP_TERMINATE_TASK:
+		return "FCP_TERMINATE_TASK";
+	default:
+		return "unknown";
+	}
+}
+
+/**
+ * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
+ * @vport: The virtual port for which this call is being executed.
+ * @rdata: Pointer to remote port local data
+ * @tgt_id: Target ID of remote device.
+ * @lun_id: Lun number for the TMF
+ * @task_mgmt_cmd: type of TMF to send
+ *
+ * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
+ * a remote port.
+ *
+ * Return Code:
+ *   0x2003 - Error
+ *   0x2002 - Success.
+ **/
+static int
+lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
+		    unsigned  tgt_id, unsigned int lun_id,
+		    uint8_t task_mgmt_cmd)
+{
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_scsi_buf *lpfc_cmd;
+	struct lpfc_iocbq *iocbq;
+	struct lpfc_iocbq *iocbqrsp;
+	struct lpfc_nodelist *pnode = rdata->pnode;
+	int ret;
+	int status;
+
+	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+		return FAILED;
+
+	lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
+	if (lpfc_cmd == NULL)
+		return FAILED;
+	lpfc_cmd->timeout = 60;
+	lpfc_cmd->rdata = rdata;
+
+	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
+					   task_mgmt_cmd);
+	if (!status) {
+		lpfc_release_scsi_buf(phba, lpfc_cmd);
+		return FAILED;
+	}
+
+	iocbq = &lpfc_cmd->cur_iocbq;
+	iocbqrsp = lpfc_sli_get_iocbq(phba);
+	if (iocbqrsp == NULL) {
+		lpfc_release_scsi_buf(phba, lpfc_cmd);
+		return FAILED;
+	}
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+			 "0702 Issue %s to TGT %d LUN %d "
+			 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
+			 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
+			 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
+			 iocbq->iocb_flag);
+
+	status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
+					  iocbq, iocbqrsp, lpfc_cmd->timeout);
+	if (status != IOCB_SUCCESS) {
+		if (status == IOCB_TIMEDOUT) {
+			iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
+			ret = TIMEOUT_ERROR;
+		} else
+			ret = FAILED;
+		lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			 "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
+			 "iocb_flag x%x\n",
+			 lpfc_taskmgmt_name(task_mgmt_cmd),
+			 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
+			 iocbqrsp->iocb.un.ulpWord[4],
+			 iocbq->iocb_flag);
+	} else if (status == IOCB_BUSY)
+		ret = FAILED;
+	else
+		ret = SUCCESS;
+
+	lpfc_sli_release_iocbq(phba, iocbqrsp);
+
+	if (ret != TIMEOUT_ERROR)
+		lpfc_release_scsi_buf(phba, lpfc_cmd);
+
+	return ret;
+}
+
+/**
+ * lpfc_chk_tgt_mapped -
+ * @vport: The virtual port to check on
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine delays until the scsi target (aka rport) for the
+ * command exists (is present and logged in) or we declare it non-existent.
+ *
+ * Return code :
+ *  0x2003 - Error
+ *  0x2002 - Success
+ **/
+static int
+lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
+{
+	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+	struct lpfc_nodelist *pnode;
+	unsigned long later;
+
+	if (!rdata) {
+		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+			"0797 Tgt Map rport failure: rdata x%p\n", rdata);
+		return FAILED;
+	}
+	pnode = rdata->pnode;
+	/*
+	 * If target is not in a MAPPED state, delay until
+	 * target is rediscovered or devloss timeout expires.
+	 */
+	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+	while (time_after(later, jiffies)) {
+		if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+			return FAILED;
+		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
+			return SUCCESS;
+		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
+		rdata = cmnd->device->hostdata;
+		if (!rdata)
+			return FAILED;
+		pnode = rdata->pnode;
+	}
+	if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
+	    (pnode->nlp_state != NLP_STE_MAPPED_NODE))
+		return FAILED;
+	return SUCCESS;
+}
+
+/**
+ * lpfc_reset_flush_io_context -
+ * @vport: The virtual port (scsi_host) for the flush context
+ * @tgt_id: If aborting by Target contect - specifies the target id
+ * @lun_id: If aborting by Lun context - specifies the lun id
+ * @context: specifies the context level to flush at.
+ *
+ * After a reset condition via TMF, we need to flush orphaned i/o
+ * contexts from the adapter. This routine aborts any contexts
+ * outstanding, then waits for their completions. The wait is
+ * bounded by devloss_tmo though.
+ *
+ * Return code :
+ *  0x2003 - Error
+ *  0x2002 - Success
+ **/
+static int
+lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
+			uint64_t lun_id, lpfc_ctx_cmd context)
+{
+	struct lpfc_hba   *phba = vport->phba;
+	unsigned long later;
+	int cnt;
+
+	cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
+	if (cnt)
+		lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
+				    tgt_id, lun_id, context);
+	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+	while (time_after(later, jiffies) && cnt) {
+		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
+		cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
+	}
+	if (cnt) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			"0724 I/O flush failure for context %s : cnt x%x\n",
+			((context == LPFC_CTX_LUN) ? "LUN" :
+			 ((context == LPFC_CTX_TGT) ? "TGT" :
+			  ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
+			cnt);
+		return FAILED;
+	}
+	return SUCCESS;
+}
+
+/**
+ * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does a device reset by sending a LUN_RESET task management
+ * command.
+ *
+ * Return code :
+ *  0x2003 - Error
+ *  0x2002 - Success
+ **/
+static int
+lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
+{
+	struct Scsi_Host  *shost = cmnd->device->host;
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+	struct lpfc_nodelist *pnode;
+	unsigned tgt_id = cmnd->device->id;
+	unsigned int lun_id = cmnd->device->lun;
+	struct lpfc_scsi_event_header scsi_event;
+	int status;
+
+	if (!rdata) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			"0798 Device Reset rport failure: rdata x%p\n", rdata);
+		return FAILED;
+	}
+	pnode = rdata->pnode;
+	status = fc_block_scsi_eh(cmnd);
+	if (status)
+		return status;
+
+	status = lpfc_chk_tgt_mapped(vport, cmnd);
+	if (status == FAILED) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			"0721 Device Reset rport failure: rdata x%p\n", rdata);
+		return FAILED;
+	}
+
+	scsi_event.event_type = FC_REG_SCSI_EVENT;
+	scsi_event.subcategory = LPFC_EVENT_LUNRESET;
+	scsi_event.lun = lun_id;
+	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
+	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
+
+	fc_host_post_vendor_event(shost, fc_get_event_number(),
+		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
+
+	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
+						FCP_LUN_RESET);
+
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			 "0713 SCSI layer issued Device Reset (%d, %d) "
+			 "return x%x\n", tgt_id, lun_id, status);
+
+	/*
+	 * We have to clean up i/o as : they may be orphaned by the TMF;
+	 * or if the TMF failed, they may be in an indeterminate state.
+	 * So, continue on.
+	 * We will report success if all the i/o aborts successfully.
+	 */
+	status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+						LPFC_CTX_LUN);
+	return status;
+}
+
+/**
+ * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does a target reset by sending a TARGET_RESET task management
+ * command.
+ *
+ * Return code :
+ *  0x2003 - Error
+ *  0x2002 - Success
+ **/
+static int
+lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
+{
+	struct Scsi_Host  *shost = cmnd->device->host;
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+	struct lpfc_nodelist *pnode;
+	unsigned tgt_id = cmnd->device->id;
+	unsigned int lun_id = cmnd->device->lun;
+	struct lpfc_scsi_event_header scsi_event;
+	int status;
+
+	if (!rdata) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			"0799 Target Reset rport failure: rdata x%p\n", rdata);
+		return FAILED;
+	}
+	pnode = rdata->pnode;
+	status = fc_block_scsi_eh(cmnd);
+	if (status)
+		return status;
+
+	status = lpfc_chk_tgt_mapped(vport, cmnd);
+	if (status == FAILED) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			"0722 Target Reset rport failure: rdata x%p\n", rdata);
+		return FAILED;
+	}
+
+	scsi_event.event_type = FC_REG_SCSI_EVENT;
+	scsi_event.subcategory = LPFC_EVENT_TGTRESET;
+	scsi_event.lun = 0;
+	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
+	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
+
+	fc_host_post_vendor_event(shost, fc_get_event_number(),
+		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
+
+	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
+					FCP_TARGET_RESET);
+
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			 "0723 SCSI layer issued Target Reset (%d, %d) "
+			 "return x%x\n", tgt_id, lun_id, status);
+
+	/*
+	 * We have to clean up i/o as : they may be orphaned by the TMF;
+	 * or if the TMF failed, they may be in an indeterminate state.
+	 * So, continue on.
+	 * We will report success if all the i/o aborts successfully.
+	 */
+	status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+					LPFC_CTX_TGT);
+	return status;
+}
+
+/**
+ * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does target reset to all targets on @cmnd->device->host.
+ * This emulates Parallel SCSI Bus Reset Semantics.
+ *
+ * Return code :
+ *  0x2003 - Error
+ *  0x2002 - Success
+ **/
+static int
+lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
+{
+	struct Scsi_Host  *shost = cmnd->device->host;
+	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_nodelist *ndlp = NULL;
+	struct lpfc_scsi_event_header scsi_event;
+	int match;
+	int ret = SUCCESS, status, i;
+
+	scsi_event.event_type = FC_REG_SCSI_EVENT;
+	scsi_event.subcategory = LPFC_EVENT_BUSRESET;
+	scsi_event.lun = 0;
+	memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
+	memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
+
+	fc_host_post_vendor_event(shost, fc_get_event_number(),
+		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
+
+	status = fc_block_scsi_eh(cmnd);
+	if (status)
+		return status;
+
+	/*
+	 * Since the driver manages a single bus device, reset all
+	 * targets known to the driver.  Should any target reset
+	 * fail, this routine returns failure to the midlayer.
+	 */
+	for (i = 0; i < LPFC_MAX_TARGET; i++) {
+		/* Search for mapped node by target ID */
+		match = 0;
+		spin_lock_irq(shost->host_lock);
+		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+			if (!NLP_CHK_NODE_ACT(ndlp))
+				continue;
+			if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
+			    ndlp->nlp_sid == i &&
+			    ndlp->rport) {
+				match = 1;
+				break;
+			}
+		}
+		spin_unlock_irq(shost->host_lock);
+		if (!match)
+			continue;
+
+		status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
+					i, 0, FCP_TARGET_RESET);
+
+		if (status != SUCCESS) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+					 "0700 Bus Reset on target %d failed\n",
+					 i);
+			ret = FAILED;
+		}
+	}
+	/*
+	 * We have to clean up i/o as : they may be orphaned by the TMFs
+	 * above; or if any of the TMFs failed, they may be in an
+	 * indeterminate state.
+	 * We will report success if all the i/o aborts successfully.
+	 */
+
+	status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
+	if (status != SUCCESS)
+		ret = FAILED;
+
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+			 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
+	return ret;
+}
+
+/**
+ * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
+ * @sdev: Pointer to scsi_device.
+ *
+ * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
+ * globally available list of scsi buffers. This routine also makes sure scsi
+ * buffer is not allocated more than HBA limit conveyed to midlayer. This list
+ * of scsi buffer exists for the lifetime of the driver.
+ *
+ * Return codes:
+ *   non-0 - Error
+ *   0 - Success
+ **/
+static int
+lpfc_slave_alloc(struct scsi_device *sdev)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+	uint32_t total = 0;
+	uint32_t num_to_alloc = 0;
+	int num_allocated = 0;
+	uint32_t sdev_cnt;
+
+	if (!rport || fc_remote_port_chkready(rport))
+		return -ENXIO;
+
+	sdev->hostdata = rport->dd_data;
+	sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
+
+	/*
+	 * Populate the cmds_per_lun count scsi_bufs into this host's globally
+	 * available list of scsi buffers.  Don't allocate more than the
+	 * HBA limit conveyed to the midlayer via the host structure.  The
+	 * formula accounts for the lun_queue_depth + error handlers + 1
+	 * extra.  This list of scsi bufs exists for the lifetime of the driver.
+	 */
+	total = phba->total_scsi_bufs;
+	num_to_alloc = vport->cfg_lun_queue_depth + 2;
+
+	/* If allocated buffers are enough do nothing */
+	if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
+		return 0;
+
+	/* Allow some exchanges to be available always to complete discovery */
+	if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+				 "0704 At limitation of %d preallocated "
+				 "command buffers\n", total);
+		return 0;
+	/* Allow some exchanges to be available always to complete discovery */
+	} else if (total + num_to_alloc >
+		phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
+		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+				 "0705 Allocation request of %d "
+				 "command buffers will exceed max of %d.  "
+				 "Reducing allocation request to %d.\n",
+				 num_to_alloc, phba->cfg_hba_queue_depth,
+				 (phba->cfg_hba_queue_depth - total));
+		num_to_alloc = phba->cfg_hba_queue_depth - total;
+	}
+	num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
+	if (num_to_alloc != num_allocated) {
+			lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+				 "0708 Allocation request of %d "
+				 "command buffers did not succeed.  "
+				 "Allocated %d buffers.\n",
+				 num_to_alloc, num_allocated);
+	}
+	if (num_allocated > 0)
+		phba->total_scsi_bufs += num_allocated;
+	return 0;
+}
+
+/**
+ * lpfc_slave_configure - scsi_host_template slave_configure entry point
+ * @sdev: Pointer to scsi_device.
+ *
+ * This routine configures following items
+ *   - Tag command queuing support for @sdev if supported.
+ *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
+ *
+ * Return codes:
+ *   0 - Success
+ **/
+static int
+lpfc_slave_configure(struct scsi_device *sdev)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+
+	if (sdev->tagged_supported)
+		scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
+	else
+		scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
+
+	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
+		lpfc_sli_handle_fast_ring_event(phba,
+			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
+		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
+			lpfc_poll_rearm_timer(phba);
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
+ * @sdev: Pointer to scsi_device.
+ *
+ * This routine sets @sdev hostatdata filed to null.
+ **/
+static void
+lpfc_slave_destroy(struct scsi_device *sdev)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
+	struct lpfc_hba   *phba = vport->phba;
+	atomic_dec(&phba->sdev_cnt);
+	sdev->hostdata = NULL;
+	return;
+}
+
+
+struct scsi_host_template lpfc_template = {
+	.module			= THIS_MODULE,
+	.name			= LPFC_DRIVER_NAME,
+	.info			= lpfc_info,
+	.queuecommand		= lpfc_queuecommand,
+	.eh_abort_handler	= lpfc_abort_handler,
+	.eh_device_reset_handler = lpfc_device_reset_handler,
+	.eh_target_reset_handler = lpfc_target_reset_handler,
+	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
+	.slave_alloc		= lpfc_slave_alloc,
+	.slave_configure	= lpfc_slave_configure,
+	.slave_destroy		= lpfc_slave_destroy,
+	.scan_finished		= lpfc_scan_finished,
+	.this_id		= -1,
+	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
+	.cmd_per_lun		= LPFC_CMD_PER_LUN,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.shost_attrs		= lpfc_hba_attrs,
+	.max_sectors		= 0xFFFF,
+	.vendor_id		= LPFC_NL_VENDOR_ID,
+	.change_queue_depth	= lpfc_change_queue_depth,
+};
+
+struct scsi_host_template lpfc_vport_template = {
+	.module			= THIS_MODULE,
+	.name			= LPFC_DRIVER_NAME,
+	.info			= lpfc_info,
+	.queuecommand		= lpfc_queuecommand,
+	.eh_abort_handler	= lpfc_abort_handler,
+	.eh_device_reset_handler = lpfc_device_reset_handler,
+	.eh_target_reset_handler = lpfc_target_reset_handler,
+	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
+	.slave_alloc		= lpfc_slave_alloc,
+	.slave_configure	= lpfc_slave_configure,
+	.slave_destroy		= lpfc_slave_destroy,
+	.scan_finished		= lpfc_scan_finished,
+	.this_id		= -1,
+	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
+	.cmd_per_lun		= LPFC_CMD_PER_LUN,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.shost_attrs		= lpfc_vport_attrs,
+	.max_sectors		= 0xFFFF,
+	.change_queue_depth	= lpfc_change_queue_depth,
+};
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_scsi.h b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_scsi.h
new file mode 100644
index 0000000..21a2ffe
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_scsi.h
@@ -0,0 +1,167 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <asm/byteorder.h>
+
+struct lpfc_hba;
+#define LPFC_FCP_CDB_LEN 16
+
+#define list_remove_head(list, entry, type, member)		\
+	do {							\
+	entry = NULL;						\
+	if (!list_empty(list)) {				\
+		entry = list_entry((list)->next, type, member);	\
+		list_del_init(&entry->member);			\
+	}							\
+	} while(0)
+
+#define list_get_first(list, type, member)			\
+	(list_empty(list)) ? NULL :				\
+	list_entry((list)->next, type, member)
+
+/* per-port data that is allocated in the FC transport for us */
+struct lpfc_rport_data {
+	struct lpfc_nodelist *pnode;	/* Pointer to the node structure. */
+};
+
+struct fcp_rsp {
+	uint32_t rspRsvd1;	/* FC Word 0, byte 0:3 */
+	uint32_t rspRsvd2;	/* FC Word 1, byte 0:3 */
+
+	uint8_t rspStatus0;	/* FCP_STATUS byte 0 (reserved) */
+	uint8_t rspStatus1;	/* FCP_STATUS byte 1 (reserved) */
+	uint8_t rspStatus2;	/* FCP_STATUS byte 2 field validity */
+#define RSP_LEN_VALID  0x01	/* bit 0 */
+#define SNS_LEN_VALID  0x02	/* bit 1 */
+#define RESID_OVER     0x04	/* bit 2 */
+#define RESID_UNDER    0x08	/* bit 3 */
+	uint8_t rspStatus3;	/* FCP_STATUS byte 3 SCSI status byte */
+
+	uint32_t rspResId;	/* Residual xfer if residual count field set in
+				   fcpStatus2 */
+	/* Received in Big Endian format */
+	uint32_t rspSnsLen;	/* Length of sense data in fcpSnsInfo */
+	/* Received in Big Endian format */
+	uint32_t rspRspLen;	/* Length of FCP response data in fcpRspInfo */
+	/* Received in Big Endian format */
+
+	uint8_t rspInfo0;	/* FCP_RSP_INFO byte 0 (reserved) */
+	uint8_t rspInfo1;	/* FCP_RSP_INFO byte 1 (reserved) */
+	uint8_t rspInfo2;	/* FCP_RSP_INFO byte 2 (reserved) */
+	uint8_t rspInfo3;	/* FCP_RSP_INFO RSP_CODE byte 3 */
+
+#define RSP_NO_FAILURE       0x00
+#define RSP_DATA_BURST_ERR   0x01
+#define RSP_CMD_FIELD_ERR    0x02
+#define RSP_RO_MISMATCH_ERR  0x03
+#define RSP_TM_NOT_SUPPORTED 0x04	/* Task mgmt function not supported */
+#define RSP_TM_NOT_COMPLETED 0x05	/* Task mgmt function not performed */
+
+	uint32_t rspInfoRsvd;	/* FCP_RSP_INFO bytes 4-7 (reserved) */
+
+	uint8_t rspSnsInfo[128];
+#define SNS_ILLEGAL_REQ 0x05	/* sense key is byte 3 ([2]) */
+#define SNSCOD_BADCMD 0x20	/* sense code is byte 13 ([12]) */
+};
+
+struct fcp_cmnd {
+	struct scsi_lun  fcp_lun;
+
+	uint8_t fcpCntl0;	/* FCP_CNTL byte 0 (reserved) */
+	uint8_t fcpCntl1;	/* FCP_CNTL byte 1 task codes */
+#define  SIMPLE_Q        0x00
+#define  HEAD_OF_Q       0x01
+#define  ORDERED_Q       0x02
+#define  ACA_Q           0x04
+#define  UNTAGGED        0x05
+	uint8_t fcpCntl2;	/* FCP_CTL byte 2 task management codes */
+#define  FCP_ABORT_TASK_SET  0x02	/* Bit 1 */
+#define  FCP_CLEAR_TASK_SET  0x04	/* bit 2 */
+#define  FCP_BUS_RESET       0x08	/* bit 3 */
+#define  FCP_LUN_RESET       0x10	/* bit 4 */
+#define  FCP_TARGET_RESET    0x20	/* bit 5 */
+#define  FCP_CLEAR_ACA       0x40	/* bit 6 */
+#define  FCP_TERMINATE_TASK  0x80	/* bit 7 */
+	uint8_t fcpCntl3;
+#define  WRITE_DATA      0x01	/* Bit 0 */
+#define  READ_DATA       0x02	/* Bit 1 */
+
+	uint8_t fcpCdb[LPFC_FCP_CDB_LEN]; /* SRB cdb field is copied here */
+	uint32_t fcpDl;		/* Total transfer length */
+
+};
+
+struct lpfc_scsicmd_bkt {
+	uint32_t cmd_count;
+};
+
+struct lpfc_scsi_buf {
+	struct list_head list;
+	struct scsi_cmnd *pCmd;
+	struct lpfc_rport_data *rdata;
+
+	uint32_t timeout;
+
+	uint16_t exch_busy;     /* SLI4 hba reported XB on complete WCQE */
+	uint16_t status;	/* From IOCB Word 7- ulpStatus */
+	uint32_t result;	/* From IOCB Word 4. */
+
+	uint32_t   seg_cnt;	/* Number of scatter-gather segments returned by
+				 * dma_map_sg.  The driver needs this for calls
+				 * to dma_unmap_sg. */
+	uint32_t prot_seg_cnt;  /* seg_cnt's counterpart for protection data */
+
+	dma_addr_t nonsg_phys;	/* Non scatter-gather physical address. */
+
+	/*
+	 * data and dma_handle are the kernel virtual and bus address of the
+	 * dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter
+	 * gather bde list that supports the sg_tablesize value.
+	 */
+	void *data;
+	dma_addr_t dma_handle;
+
+	struct fcp_cmnd *fcp_cmnd;
+	struct fcp_rsp *fcp_rsp;
+	struct ulp_bde64 *fcp_bpl;
+
+	dma_addr_t dma_phys_bpl;
+
+	/* cur_iocbq has phys of the dma-able buffer.
+	 * Iotag is in here
+	 */
+	struct lpfc_iocbq cur_iocbq;
+	wait_queue_head_t *waitq;
+	unsigned long start_time;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+	/* Used to restore any changes to protection data for error injection */
+	void *prot_data_segment;
+	uint32_t prot_data;
+	uint32_t prot_data_type;
+#define	LPFC_INJERR_REFTAG	1
+#define	LPFC_INJERR_APPTAG	2
+#define	LPFC_INJERR_GUARD	3
+#endif
+};
+
+#define LPFC_SCSI_DMA_EXT_SIZE 264
+#define LPFC_BPL_SIZE          1024
+#define MDAC_DIRECT_CMD                  0x22
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_sli.c b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_sli.c
new file mode 100644
index 0000000..dbaf5b9
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_sli.c
@@ -0,0 +1,16044 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+#include <linux/aer.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_compat.h"
+#include "lpfc_debugfs.h"
+#include "lpfc_vport.h"
+
+/* There are only four IOCB completion types. */
+typedef enum _lpfc_iocb_type {
+	LPFC_UNKNOWN_IOCB,
+	LPFC_UNSOL_IOCB,
+	LPFC_SOL_IOCB,
+	LPFC_ABORT_IOCB
+} lpfc_iocb_type;
+
+
+/* Provide function prototypes local to this module. */
+static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
+				  uint32_t);
+static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
+			      uint8_t *, uint32_t *);
+static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
+							 struct lpfc_iocbq *);
+static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
+				      struct hbq_dmabuf *);
+static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
+				    struct lpfc_cqe *);
+
+static IOCB_t *
+lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
+{
+	return &iocbq->iocb;
+}
+
+/**
+ * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
+ * @q: The Work Queue to operate on.
+ * @wqe: The work Queue Entry to put on the Work queue.
+ *
+ * This routine will copy the contents of @wqe to the next available entry on
+ * the @q. This function will then ring the Work Queue Doorbell to signal the
+ * HBA to start processing the Work Queue Entry. This function returns 0 if
+ * successful. If no entries are available on @q then this function will return
+ * -ENOMEM.
+ * The caller is expected to hold the hbalock when calling this routine.
+ **/
+static uint32_t
+lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
+{
+	union lpfc_wqe *temp_wqe;
+	struct lpfc_register doorbell;
+	uint32_t host_index;
+
+	/* sanity check on queue memory */
+	if (unlikely(!q))
+		return -ENOMEM;
+	temp_wqe = q->qe[q->host_index].wqe;
+
+	/* If the host has not yet processed the next entry then we are done */
+	if (((q->host_index + 1) % q->entry_count) == q->hba_index)
+		return -ENOMEM;
+	/* set consumption flag every once in a while */
+	if (!((q->host_index + 1) % q->entry_repost))
+		bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
+	if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
+		bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
+	lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
+
+	/* Update the host index before invoking device */
+	host_index = q->host_index;
+	q->host_index = ((q->host_index + 1) % q->entry_count);
+
+	/* Ring Doorbell */
+	doorbell.word0 = 0;
+	bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
+	bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
+	bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
+	writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
+	readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
+
+	return 0;
+}
+
+/**
+ * lpfc_sli4_wq_release - Updates internal hba index for WQ
+ * @q: The Work Queue to operate on.
+ * @index: The index to advance the hba index to.
+ *
+ * This routine will update the HBA index of a queue to reflect consumption of
+ * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
+ * an entry the host calls this function to update the queue's internal
+ * pointers. This routine returns the number of entries that were consumed by
+ * the HBA.
+ **/
+static uint32_t
+lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
+{
+	uint32_t released = 0;
+
+	/* sanity check on queue memory */
+	if (unlikely(!q))
+		return 0;
+
+	if (q->hba_index == index)
+		return 0;
+	do {
+		q->hba_index = ((q->hba_index + 1) % q->entry_count);
+		released++;
+	} while (q->hba_index != index);
+	return released;
+}
+
+/**
+ * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
+ * @q: The Mailbox Queue to operate on.
+ * @wqe: The Mailbox Queue Entry to put on the Work queue.
+ *
+ * This routine will copy the contents of @mqe to the next available entry on
+ * the @q. This function will then ring the Work Queue Doorbell to signal the
+ * HBA to start processing the Work Queue Entry. This function returns 0 if
+ * successful. If no entries are available on @q then this function will return
+ * -ENOMEM.
+ * The caller is expected to hold the hbalock when calling this routine.
+ **/
+static uint32_t
+lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
+{
+	struct lpfc_mqe *temp_mqe;
+	struct lpfc_register doorbell;
+	uint32_t host_index;
+
+	/* sanity check on queue memory */
+	if (unlikely(!q))
+		return -ENOMEM;
+	temp_mqe = q->qe[q->host_index].mqe;
+
+	/* If the host has not yet processed the next entry then we are done */
+	if (((q->host_index + 1) % q->entry_count) == q->hba_index)
+		return -ENOMEM;
+	lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
+	/* Save off the mailbox pointer for completion */
+	q->phba->mbox = (MAILBOX_t *)temp_mqe;
+
+	/* Update the host index before invoking device */
+	host_index = q->host_index;
+	q->host_index = ((q->host_index + 1) % q->entry_count);
+
+	/* Ring Doorbell */
+	doorbell.word0 = 0;
+	bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
+	bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
+	writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
+	readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
+	return 0;
+}
+
+/**
+ * lpfc_sli4_mq_release - Updates internal hba index for MQ
+ * @q: The Mailbox Queue to operate on.
+ *
+ * This routine will update the HBA index of a queue to reflect consumption of
+ * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
+ * an entry the host calls this function to update the queue's internal
+ * pointers. This routine returns the number of entries that were consumed by
+ * the HBA.
+ **/
+static uint32_t
+lpfc_sli4_mq_release(struct lpfc_queue *q)
+{
+	/* sanity check on queue memory */
+	if (unlikely(!q))
+		return 0;
+
+	/* Clear the mailbox pointer for completion */
+	q->phba->mbox = NULL;
+	q->hba_index = ((q->hba_index + 1) % q->entry_count);
+	return 1;
+}
+
+/**
+ * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
+ * @q: The Event Queue to get the first valid EQE from
+ *
+ * This routine will get the first valid Event Queue Entry from @q, update
+ * the queue's internal hba index, and return the EQE. If no valid EQEs are in
+ * the Queue (no more work to do), or the Queue is full of EQEs that have been
+ * processed, but not popped back to the HBA then this routine will return NULL.
+ **/
+static struct lpfc_eqe *
+lpfc_sli4_eq_get(struct lpfc_queue *q)
+{
+	struct lpfc_eqe *eqe;
+
+	/* sanity check on queue memory */
+	if (unlikely(!q))
+		return NULL;
+	eqe = q->qe[q->hba_index].eqe;
+
+	/* If the next EQE is not valid then we are done */
+	if (!bf_get_le32(lpfc_eqe_valid, eqe))
+		return NULL;
+	/* If the host has not yet processed the next entry then we are done */
+	if (((q->hba_index + 1) % q->entry_count) == q->host_index)
+		return NULL;
+
+	q->hba_index = ((q->hba_index + 1) % q->entry_count);
+	return eqe;
+}
+
+/**
+ * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
+ * @q: The Event Queue that the host has completed processing for.
+ * @arm: Indicates whether the host wants to arms this CQ.
+ *
+ * This routine will mark all Event Queue Entries on @q, from the last
+ * known completed entry to the last entry that was processed, as completed
+ * by clearing the valid bit for each completion queue entry. Then it will
+ * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
+ * The internal host index in the @q will be updated by this routine to indicate
+ * that the host has finished processing the entries. The @arm parameter
+ * indicates that the queue should be rearmed when ringing the doorbell.
+ *
+ * This function will return the number of EQEs that were popped.
+ **/
+uint32_t
+lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
+{
+	uint32_t released = 0;
+	struct lpfc_eqe *temp_eqe;
+	struct lpfc_register doorbell;
+
+	/* sanity check on queue memory */
+	if (unlikely(!q))
+		return 0;
+
+	/* while there are valid entries */
+	while (q->hba_index != q->host_index) {
+		temp_eqe = q->qe[q->host_index].eqe;
+		bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
+		released++;
+		q->host_index = ((q->host_index + 1) % q->entry_count);
+	}
+	if (unlikely(released == 0 && !arm))
+		return 0;
+
+	/* ring doorbell for number popped */
+	doorbell.word0 = 0;
+	if (arm) {
+		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
+		bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
+	}
+	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
+	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
+	bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
+			(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
+	bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
+	writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
+	/* PCI read to flush PCI pipeline on re-arming for INTx mode */
+	if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
+		readl(q->phba->sli4_hba.EQCQDBregaddr);
+	return released;
+}
+
+/**
+ * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
+ * @q: The Completion Queue to get the first valid CQE from
+ *
+ * This routine will get the first valid Completion Queue Entry from @q, update
+ * the queue's internal hba index, and return the CQE. If no valid CQEs are in
+ * the Queue (no more work to do), or the Queue is full of CQEs that have been
+ * processed, but not popped back to the HBA then this routine will return NULL.
+ **/
+static struct lpfc_cqe *
+lpfc_sli4_cq_get(struct lpfc_queue *q)
+{
+	struct lpfc_cqe *cqe;
+
+	/* sanity check on queue memory */
+	if (unlikely(!q))
+		return NULL;
+
+	/* If the next CQE is not valid then we are done */
+	if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
+		return NULL;
+	/* If the host has not yet processed the next entry then we are done */
+	if (((q->hba_index + 1) % q->entry_count) == q->host_index)
+		return NULL;
+
+	cqe = q->qe[q->hba_index].cqe;
+	q->hba_index = ((q->hba_index + 1) % q->entry_count);
+	return cqe;
+}
+
+/**
+ * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
+ * @q: The Completion Queue that the host has completed processing for.
+ * @arm: Indicates whether the host wants to arms this CQ.
+ *
+ * This routine will mark all Completion queue entries on @q, from the last
+ * known completed entry to the last entry that was processed, as completed
+ * by clearing the valid bit for each completion queue entry. Then it will
+ * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
+ * The internal host index in the @q will be updated by this routine to indicate
+ * that the host has finished processing the entries. The @arm parameter
+ * indicates that the queue should be rearmed when ringing the doorbell.
+ *
+ * This function will return the number of CQEs that were released.
+ **/
+uint32_t
+lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
+{
+	uint32_t released = 0;
+	struct lpfc_cqe *temp_qe;
+	struct lpfc_register doorbell;
+
+	/* sanity check on queue memory */
+	if (unlikely(!q))
+		return 0;
+	/* while there are valid entries */
+	while (q->hba_index != q->host_index) {
+		temp_qe = q->qe[q->host_index].cqe;
+		bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
+		released++;
+		q->host_index = ((q->host_index + 1) % q->entry_count);
+	}
+	if (unlikely(released == 0 && !arm))
+		return 0;
+
+	/* ring doorbell for number popped */
+	doorbell.word0 = 0;
+	if (arm)
+		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
+	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
+	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
+	bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
+			(q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
+	bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
+	writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
+	return released;
+}
+
+/**
+ * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
+ * @q: The Header Receive Queue to operate on.
+ * @wqe: The Receive Queue Entry to put on the Receive queue.
+ *
+ * This routine will copy the contents of @wqe to the next available entry on
+ * the @q. This function will then ring the Receive Queue Doorbell to signal the
+ * HBA to start processing the Receive Queue Entry. This function returns the
+ * index that the rqe was copied to if successful. If no entries are available
+ * on @q then this function will return -ENOMEM.
+ * The caller is expected to hold the hbalock when calling this routine.
+ **/
+static int
+lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
+		 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
+{
+	struct lpfc_rqe *temp_hrqe;
+	struct lpfc_rqe *temp_drqe;
+	struct lpfc_register doorbell;
+	int put_index = hq->host_index;
+
+	/* sanity check on queue memory */
+	if (unlikely(!hq) || unlikely(!dq))
+		return -ENOMEM;
+	temp_hrqe = hq->qe[hq->host_index].rqe;
+	temp_drqe = dq->qe[dq->host_index].rqe;
+
+	if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
+		return -EINVAL;
+	if (hq->host_index != dq->host_index)
+		return -EINVAL;
+	/* If the host has not yet processed the next entry then we are done */
+	if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
+		return -EBUSY;
+	lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
+	lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
+
+	/* Update the host index to point to the next slot */
+	hq->host_index = ((hq->host_index + 1) % hq->entry_count);
+	dq->host_index = ((dq->host_index + 1) % dq->entry_count);
+
+	/* Ring The Header Receive Queue Doorbell */
+	if (!(hq->host_index % hq->entry_repost)) {
+		doorbell.word0 = 0;
+		bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
+		       hq->entry_repost);
+		bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
+		writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
+	}
+	return put_index;
+}
+
+/**
+ * lpfc_sli4_rq_release - Updates internal hba index for RQ
+ * @q: The Header Receive Queue to operate on.
+ *
+ * This routine will update the HBA index of a queue to reflect consumption of
+ * one Receive Queue Entry by the HBA. When the HBA indicates that it has
+ * consumed an entry the host calls this function to update the queue's
+ * internal pointers. This routine returns the number of entries that were
+ * consumed by the HBA.
+ **/
+static uint32_t
+lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
+{
+	/* sanity check on queue memory */
+	if (unlikely(!hq) || unlikely(!dq))
+		return 0;
+
+	if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
+		return 0;
+	hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
+	dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
+	return 1;
+}
+
+/**
+ * lpfc_cmd_iocb - Get next command iocb entry in the ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function returns pointer to next command iocb entry
+ * in the command ring. The caller must hold hbalock to prevent
+ * other threads consume the next command iocb.
+ * SLI-2/SLI-3 provide different sized iocbs.
+ **/
+static inline IOCB_t *
+lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	return (IOCB_t *) (((char *) pring->cmdringaddr) +
+			   pring->cmdidx * phba->iocb_cmd_size);
+}
+
+/**
+ * lpfc_resp_iocb - Get next response iocb entry in the ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function returns pointer to next response iocb entry
+ * in the response ring. The caller must hold hbalock to make sure
+ * that no other thread consume the next response iocb.
+ * SLI-2/SLI-3 provide different sized iocbs.
+ **/
+static inline IOCB_t *
+lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	return (IOCB_t *) (((char *) pring->rspringaddr) +
+			   pring->rspidx * phba->iocb_rsp_size);
+}
+
+/**
+ * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with hbalock held. This function
+ * allocates a new driver iocb object from the iocb pool. If the
+ * allocation is successful, it returns pointer to the newly
+ * allocated iocb object else it returns NULL.
+ **/
+static struct lpfc_iocbq *
+__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
+{
+	struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
+	struct lpfc_iocbq * iocbq = NULL;
+
+	list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
+	if (iocbq)
+		phba->iocb_cnt++;
+	if (phba->iocb_cnt > phba->iocb_max)
+		phba->iocb_max = phba->iocb_cnt;
+	return iocbq;
+}
+
+/**
+ * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
+ * @phba: Pointer to HBA context object.
+ * @xritag: XRI value.
+ *
+ * This function clears the sglq pointer from the array of acive
+ * sglq's. The xritag that is passed in is used to index into the
+ * array. Before the xritag can be used it needs to be adjusted
+ * by subtracting the xribase.
+ *
+ * Returns sglq ponter = success, NULL = Failure.
+ **/
+static struct lpfc_sglq *
+__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
+{
+	struct lpfc_sglq *sglq;
+
+	sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
+	phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
+	return sglq;
+}
+
+/**
+ * __lpfc_get_active_sglq - Get the active sglq for this XRI.
+ * @phba: Pointer to HBA context object.
+ * @xritag: XRI value.
+ *
+ * This function returns the sglq pointer from the array of acive
+ * sglq's. The xritag that is passed in is used to index into the
+ * array. Before the xritag can be used it needs to be adjusted
+ * by subtracting the xribase.
+ *
+ * Returns sglq ponter = success, NULL = Failure.
+ **/
+struct lpfc_sglq *
+__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
+{
+	struct lpfc_sglq *sglq;
+
+	sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
+	return sglq;
+}
+
+/**
+ * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @xritag: xri used in this exchange.
+ * @rrq: The RRQ to be cleared.
+ *
+ **/
+void
+lpfc_clr_rrq_active(struct lpfc_hba *phba,
+		    uint16_t xritag,
+		    struct lpfc_node_rrq *rrq)
+{
+	struct lpfc_nodelist *ndlp = NULL;
+
+	if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
+		ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
+
+	/* The target DID could have been swapped (cable swap)
+	 * we should use the ndlp from the findnode if it is
+	 * available.
+	 */
+	if ((!ndlp) && rrq->ndlp)
+		ndlp = rrq->ndlp;
+
+	if (!ndlp)
+		goto out;
+
+	if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
+		rrq->send_rrq = 0;
+		rrq->xritag = 0;
+		rrq->rrq_stop_time = 0;
+	}
+out:
+	mempool_free(rrq, phba->rrq_pool);
+}
+
+/**
+ * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with hbalock held. This function
+ * Checks if stop_time (ratov from setting rrq active) has
+ * been reached, if it has and the send_rrq flag is set then
+ * it will call lpfc_send_rrq. If the send_rrq flag is not set
+ * then it will just call the routine to clear the rrq and
+ * free the rrq resource.
+ * The timer is set to the next rrq that is going to expire before
+ * leaving the routine.
+ *
+ **/
+void
+lpfc_handle_rrq_active(struct lpfc_hba *phba)
+{
+	struct lpfc_node_rrq *rrq;
+	struct lpfc_node_rrq *nextrrq;
+	unsigned long next_time;
+	unsigned long iflags;
+	LIST_HEAD(send_rrq);
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+	next_time = jiffies + HZ * (phba->fc_ratov + 1);
+	list_for_each_entry_safe(rrq, nextrrq,
+				 &phba->active_rrq_list, list) {
+		if (time_after(jiffies, rrq->rrq_stop_time))
+			list_move(&rrq->list, &send_rrq);
+		else if (time_before(rrq->rrq_stop_time, next_time))
+			next_time = rrq->rrq_stop_time;
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	if (!list_empty(&phba->active_rrq_list))
+		mod_timer(&phba->rrq_tmr, next_time);
+	list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
+		list_del(&rrq->list);
+		if (!rrq->send_rrq)
+			/* this call will free the rrq */
+		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+		else if (lpfc_send_rrq(phba, rrq)) {
+			/* if we send the rrq then the completion handler
+			*  will clear the bit in the xribitmap.
+			*/
+			lpfc_clr_rrq_active(phba, rrq->xritag,
+					    rrq);
+		}
+	}
+}
+
+/**
+ * lpfc_get_active_rrq - Get the active RRQ for this exchange.
+ * @vport: Pointer to vport context object.
+ * @xri: The xri used in the exchange.
+ * @did: The targets DID for this exchange.
+ *
+ * returns NULL = rrq not found in the phba->active_rrq_list.
+ *         rrq = rrq for this xri and target.
+ **/
+struct lpfc_node_rrq *
+lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_node_rrq *rrq;
+	struct lpfc_node_rrq *nextrrq;
+	unsigned long iflags;
+
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		return NULL;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
+		if (rrq->vport == vport && rrq->xritag == xri &&
+				rrq->nlp_DID == did){
+			list_del(&rrq->list);
+			spin_unlock_irqrestore(&phba->hbalock, iflags);
+			return rrq;
+		}
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return NULL;
+}
+
+/**
+ * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
+ * @vport: Pointer to vport context object.
+ * @ndlp: Pointer to the lpfc_node_list structure.
+ * If ndlp is NULL Remove all active RRQs for this vport from the
+ * phba->active_rrq_list and clear the rrq.
+ * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
+ **/
+void
+lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_node_rrq *rrq;
+	struct lpfc_node_rrq *nextrrq;
+	unsigned long iflags;
+	LIST_HEAD(rrq_list);
+
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		return;
+	if (!ndlp) {
+		lpfc_sli4_vport_delete_els_xri_aborted(vport);
+		lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
+	}
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
+		if ((rrq->vport == vport) && (!ndlp  || rrq->ndlp == ndlp))
+			list_move(&rrq->list, &rrq_list);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
+		list_del(&rrq->list);
+		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+	}
+}
+
+/**
+ * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
+ * @phba: Pointer to HBA context object.
+ *
+ * Remove all rrqs from the phba->active_rrq_list and free them by
+ * calling __lpfc_clr_active_rrq
+ *
+ **/
+void
+lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
+{
+	struct lpfc_node_rrq *rrq;
+	struct lpfc_node_rrq *nextrrq;
+	unsigned long next_time;
+	unsigned long iflags;
+	LIST_HEAD(rrq_list);
+
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		return;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+	next_time = jiffies + HZ * (phba->fc_ratov * 2);
+	list_splice_init(&phba->active_rrq_list, &rrq_list);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
+		list_del(&rrq->list);
+		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+	}
+	if (!list_empty(&phba->active_rrq_list))
+		mod_timer(&phba->rrq_tmr, next_time);
+}
+
+
+/**
+ * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @ndlp: Targets nodelist pointer for this exchange.
+ * @xritag the xri in the bitmap to test.
+ *
+ * This function is called with hbalock held. This function
+ * returns 0 = rrq not active for this xri
+ *         1 = rrq is valid for this xri.
+ **/
+int
+lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+			uint16_t  xritag)
+{
+	if (!ndlp)
+		return 0;
+	if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
+			return 1;
+	else
+		return 0;
+}
+
+/**
+ * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @ndlp: nodelist pointer for this target.
+ * @xritag: xri used in this exchange.
+ * @rxid: Remote Exchange ID.
+ * @send_rrq: Flag used to determine if we should send rrq els cmd.
+ *
+ * This function takes the hbalock.
+ * The active bit is always set in the active rrq xri_bitmap even
+ * if there is no slot avaiable for the other rrq information.
+ *
+ * returns 0 rrq actived for this xri
+ *         < 0 No memory or invalid ndlp.
+ **/
+int
+lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+		    uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
+{
+	unsigned long iflags;
+	struct lpfc_node_rrq *rrq;
+	int empty;
+
+	if (!ndlp)
+		return -EINVAL;
+
+	if (!phba->cfg_enable_rrq)
+		return -EINVAL;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	if (phba->pport->load_flag & FC_UNLOADING) {
+		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+		goto out;
+	}
+
+	/*
+	 * set the active bit even if there is no mem available.
+	 */
+	if (NLP_CHK_FREE_REQ(ndlp))
+		goto out;
+
+	if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
+		goto out;
+
+	if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
+		goto out;
+
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
+	if (!rrq) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
+				" DID:0x%x Send:%d\n",
+				xritag, rxid, ndlp->nlp_DID, send_rrq);
+		return -EINVAL;
+	}
+	rrq->send_rrq = send_rrq;
+	rrq->xritag = xritag;
+	rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
+	rrq->ndlp = ndlp;
+	rrq->nlp_DID = ndlp->nlp_DID;
+	rrq->vport = ndlp->vport;
+	rrq->rxid = rxid;
+	rrq->send_rrq = send_rrq;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	empty = list_empty(&phba->active_rrq_list);
+	list_add_tail(&rrq->list, &phba->active_rrq_list);
+	phba->hba_flag |= HBA_RRQ_ACTIVE;
+	if (empty)
+		lpfc_worker_wake_up(phba);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return 0;
+out:
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"2921 Can't set rrq active xri:0x%x rxid:0x%x"
+			" DID:0x%x Send:%d\n",
+			xritag, rxid, ndlp->nlp_DID, send_rrq);
+	return -EINVAL;
+}
+
+/**
+ * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
+ * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to the iocbq.
+ *
+ * This function is called with hbalock held. This function
+ * gets a new driver sglq object from the sglq list. If the
+ * list is not empty then it is successful, it returns pointer to the newly
+ * allocated sglq object else it returns NULL.
+ **/
+static struct lpfc_sglq *
+__lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
+{
+	struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
+	struct lpfc_sglq *sglq = NULL;
+	struct lpfc_sglq *start_sglq = NULL;
+	struct lpfc_scsi_buf *lpfc_cmd;
+	struct lpfc_nodelist *ndlp;
+	int found = 0;
+
+	if (piocbq->iocb_flag &  LPFC_IO_FCP) {
+		lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
+		ndlp = lpfc_cmd->rdata->pnode;
+	} else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
+			!(piocbq->iocb_flag & LPFC_IO_LIBDFC))
+		ndlp = piocbq->context_un.ndlp;
+	else
+		ndlp = piocbq->context1;
+
+	list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
+	start_sglq = sglq;
+	while (!found) {
+		if (!sglq)
+			return NULL;
+		if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
+			/* This xri has an rrq outstanding for this DID.
+			 * put it back in the list and get another xri.
+			 */
+			list_add_tail(&sglq->list, lpfc_sgl_list);
+			sglq = NULL;
+			list_remove_head(lpfc_sgl_list, sglq,
+						struct lpfc_sglq, list);
+			if (sglq == start_sglq) {
+				sglq = NULL;
+				break;
+			} else
+				continue;
+		}
+		sglq->ndlp = ndlp;
+		found = 1;
+		phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
+		sglq->state = SGL_ALLOCATED;
+	}
+	return sglq;
+}
+
+/**
+ * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held. This function
+ * allocates a new driver iocb object from the iocb pool. If the
+ * allocation is successful, it returns pointer to the newly
+ * allocated iocb object else it returns NULL.
+ **/
+struct lpfc_iocbq *
+lpfc_sli_get_iocbq(struct lpfc_hba *phba)
+{
+	struct lpfc_iocbq * iocbq = NULL;
+	unsigned long iflags;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	iocbq = __lpfc_sli_get_iocbq(phba);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return iocbq;
+}
+
+/**
+ * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with hbalock held to release driver
+ * iocb object to the iocb pool. The iotag in the iocb object
+ * does not change for each use of the iocb object. This function
+ * clears all other fields of the iocb object when it is freed.
+ * The sqlq structure that holds the xritag and phys and virtual
+ * mappings for the scatter gather list is retrieved from the
+ * active array of sglq. The get of the sglq pointer also clears
+ * the entry in the array. If the status of the IO indiactes that
+ * this IO was aborted then the sglq entry it put on the
+ * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
+ * IO has good status or fails for any other reason then the sglq
+ * entry is added to the free list (lpfc_sgl_list).
+ **/
+static void
+__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+	struct lpfc_sglq *sglq;
+	size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
+	unsigned long iflag = 0;
+	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+
+	if (iocbq->sli4_xritag == NO_XRI)
+		sglq = NULL;
+	else
+		sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
+
+	if (sglq)  {
+		if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
+			(sglq->state != SGL_XRI_ABORTED)) {
+			spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
+					iflag);
+			list_add(&sglq->list,
+				&phba->sli4_hba.lpfc_abts_els_sgl_list);
+			spin_unlock_irqrestore(
+				&phba->sli4_hba.abts_sgl_list_lock, iflag);
+		} else {
+			sglq->state = SGL_FREED;
+			sglq->ndlp = NULL;
+			list_add_tail(&sglq->list,
+				&phba->sli4_hba.lpfc_sgl_list);
+
+			/* Check if TXQ queue needs to be serviced */
+			if (pring->txq_cnt)
+				lpfc_worker_wake_up(phba);
+		}
+	}
+
+
+	/*
+	 * Clean all volatile data fields, preserve iotag and node struct.
+	 */
+	memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
+	iocbq->sli4_lxritag = NO_XRI;
+	iocbq->sli4_xritag = NO_XRI;
+	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
+}
+
+
+/**
+ * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with hbalock held to release driver
+ * iocb object to the iocb pool. The iotag in the iocb object
+ * does not change for each use of the iocb object. This function
+ * clears all other fields of the iocb object when it is freed.
+ **/
+static void
+__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+	size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
+
+	/*
+	 * Clean all volatile data fields, preserve iotag and node struct.
+	 */
+	memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
+	iocbq->sli4_xritag = NO_XRI;
+	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
+}
+
+/**
+ * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with hbalock held to release driver
+ * iocb object to the iocb pool. The iotag in the iocb object
+ * does not change for each use of the iocb object. This function
+ * clears all other fields of the iocb object when it is freed.
+ **/
+static void
+__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+	phba->__lpfc_sli_release_iocbq(phba, iocbq);
+	phba->iocb_cnt--;
+}
+
+/**
+ * lpfc_sli_release_iocbq - Release iocb to the iocb pool
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with no lock held to release the iocb to
+ * iocb pool.
+ **/
+void
+lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+	unsigned long iflags;
+
+	/*
+	 * Clean all volatile data fields, preserve iotag and node struct.
+	 */
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	__lpfc_sli_release_iocbq(phba, iocbq);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+}
+
+/**
+ * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
+ * @phba: Pointer to HBA context object.
+ * @iocblist: List of IOCBs.
+ * @ulpstatus: ULP status in IOCB command field.
+ * @ulpWord4: ULP word-4 in IOCB command field.
+ *
+ * This function is called with a list of IOCBs to cancel. It cancels the IOCB
+ * on the list by invoking the complete callback function associated with the
+ * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
+ * fields.
+ **/
+void
+lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
+		      uint32_t ulpstatus, uint32_t ulpWord4)
+{
+	struct lpfc_iocbq *piocb;
+
+	while (!list_empty(iocblist)) {
+		list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
+
+		if (!piocb->iocb_cmpl)
+			lpfc_sli_release_iocbq(phba, piocb);
+		else {
+			piocb->iocb.ulpStatus = ulpstatus;
+			piocb->iocb.un.ulpWord[4] = ulpWord4;
+			(piocb->iocb_cmpl) (phba, piocb, piocb);
+		}
+	}
+	return;
+}
+
+/**
+ * lpfc_sli_iocb_cmd_type - Get the iocb type
+ * @iocb_cmnd: iocb command code.
+ *
+ * This function is called by ring event handler function to get the iocb type.
+ * This function translates the iocb command to an iocb command type used to
+ * decide the final disposition of each completed IOCB.
+ * The function returns
+ * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
+ * LPFC_SOL_IOCB     if it is a solicited iocb completion
+ * LPFC_ABORT_IOCB   if it is an abort iocb
+ * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
+ *
+ * The caller is not required to hold any lock.
+ **/
+static lpfc_iocb_type
+lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
+{
+	lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
+
+	if (iocb_cmnd > CMD_MAX_IOCB_CMD)
+		return 0;
+
+	switch (iocb_cmnd) {
+	case CMD_XMIT_SEQUENCE_CR:
+	case CMD_XMIT_SEQUENCE_CX:
+	case CMD_XMIT_BCAST_CN:
+	case CMD_XMIT_BCAST_CX:
+	case CMD_ELS_REQUEST_CR:
+	case CMD_ELS_REQUEST_CX:
+	case CMD_CREATE_XRI_CR:
+	case CMD_CREATE_XRI_CX:
+	case CMD_GET_RPI_CN:
+	case CMD_XMIT_ELS_RSP_CX:
+	case CMD_GET_RPI_CR:
+	case CMD_FCP_IWRITE_CR:
+	case CMD_FCP_IWRITE_CX:
+	case CMD_FCP_IREAD_CR:
+	case CMD_FCP_IREAD_CX:
+	case CMD_FCP_ICMND_CR:
+	case CMD_FCP_ICMND_CX:
+	case CMD_FCP_TSEND_CX:
+	case CMD_FCP_TRSP_CX:
+	case CMD_FCP_TRECEIVE_CX:
+	case CMD_FCP_AUTO_TRSP_CX:
+	case CMD_ADAPTER_MSG:
+	case CMD_ADAPTER_DUMP:
+	case CMD_XMIT_SEQUENCE64_CR:
+	case CMD_XMIT_SEQUENCE64_CX:
+	case CMD_XMIT_BCAST64_CN:
+	case CMD_XMIT_BCAST64_CX:
+	case CMD_ELS_REQUEST64_CR:
+	case CMD_ELS_REQUEST64_CX:
+	case CMD_FCP_IWRITE64_CR:
+	case CMD_FCP_IWRITE64_CX:
+	case CMD_FCP_IREAD64_CR:
+	case CMD_FCP_IREAD64_CX:
+	case CMD_FCP_ICMND64_CR:
+	case CMD_FCP_ICMND64_CX:
+	case CMD_FCP_TSEND64_CX:
+	case CMD_FCP_TRSP64_CX:
+	case CMD_FCP_TRECEIVE64_CX:
+	case CMD_GEN_REQUEST64_CR:
+	case CMD_GEN_REQUEST64_CX:
+	case CMD_XMIT_ELS_RSP64_CX:
+	case DSSCMD_IWRITE64_CR:
+	case DSSCMD_IWRITE64_CX:
+	case DSSCMD_IREAD64_CR:
+	case DSSCMD_IREAD64_CX:
+		type = LPFC_SOL_IOCB;
+		break;
+	case CMD_ABORT_XRI_CN:
+	case CMD_ABORT_XRI_CX:
+	case CMD_CLOSE_XRI_CN:
+	case CMD_CLOSE_XRI_CX:
+	case CMD_XRI_ABORTED_CX:
+	case CMD_ABORT_MXRI64_CN:
+	case CMD_XMIT_BLS_RSP64_CX:
+		type = LPFC_ABORT_IOCB;
+		break;
+	case CMD_RCV_SEQUENCE_CX:
+	case CMD_RCV_ELS_REQ_CX:
+	case CMD_RCV_SEQUENCE64_CX:
+	case CMD_RCV_ELS_REQ64_CX:
+	case CMD_ASYNC_STATUS:
+	case CMD_IOCB_RCV_SEQ64_CX:
+	case CMD_IOCB_RCV_ELS64_CX:
+	case CMD_IOCB_RCV_CONT64_CX:
+	case CMD_IOCB_RET_XRI64_CX:
+		type = LPFC_UNSOL_IOCB;
+		break;
+	case CMD_IOCB_XMIT_MSEQ64_CR:
+	case CMD_IOCB_XMIT_MSEQ64_CX:
+	case CMD_IOCB_RCV_SEQ_LIST64_CX:
+	case CMD_IOCB_RCV_ELS_LIST64_CX:
+	case CMD_IOCB_CLOSE_EXTENDED_CN:
+	case CMD_IOCB_ABORT_EXTENDED_CN:
+	case CMD_IOCB_RET_HBQE64_CN:
+	case CMD_IOCB_FCP_IBIDIR64_CR:
+	case CMD_IOCB_FCP_IBIDIR64_CX:
+	case CMD_IOCB_FCP_ITASKMGT64_CX:
+	case CMD_IOCB_LOGENTRY_CN:
+	case CMD_IOCB_LOGENTRY_ASYNC_CN:
+		printk("%s - Unhandled SLI-3 Command x%x\n",
+				__func__, iocb_cmnd);
+		type = LPFC_UNKNOWN_IOCB;
+		break;
+	default:
+		type = LPFC_UNKNOWN_IOCB;
+		break;
+	}
+
+	return type;
+}
+
+/**
+ * lpfc_sli_ring_map - Issue config_ring mbox for all rings
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called from SLI initialization code
+ * to configure every ring of the HBA's SLI interface. The
+ * caller is not required to hold any lock. This function issues
+ * a config_ring mailbox command for each ring.
+ * This function returns zero if successful else returns a negative
+ * error code.
+ **/
+static int
+lpfc_sli_ring_map(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	LPFC_MBOXQ_t *pmb;
+	MAILBOX_t *pmbox;
+	int i, rc, ret = 0;
+
+	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb)
+		return -ENOMEM;
+	pmbox = &pmb->u.mb;
+	phba->link_state = LPFC_INIT_MBX_CMDS;
+	for (i = 0; i < psli->num_rings; i++) {
+		lpfc_config_ring(phba, i, pmb);
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0446 Adapter failed to init (%d), "
+					"mbxCmd x%x CFG_RING, mbxStatus x%x, "
+					"ring %d\n",
+					rc, pmbox->mbxCommand,
+					pmbox->mbxStatus, i);
+			phba->link_state = LPFC_HBA_ERROR;
+			ret = -ENXIO;
+			break;
+		}
+	}
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return ret;
+}
+
+/**
+ * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to the driver iocb object.
+ *
+ * This function is called with hbalock held. The function adds the
+ * new iocb to txcmplq of the given ring. This function always returns
+ * 0. If this function is called for ELS ring, this function checks if
+ * there is a vport associated with the ELS command. This function also
+ * starts els_tmofunc timer if this is an ELS command.
+ **/
+static int
+lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			struct lpfc_iocbq *piocb)
+{
+	list_add_tail(&piocb->list, &pring->txcmplq);
+	piocb->iocb_flag |= LPFC_IO_ON_Q;
+	pring->txcmplq_cnt++;
+	if (pring->txcmplq_cnt > pring->txcmplq_max)
+		pring->txcmplq_max = pring->txcmplq_cnt;
+
+	if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
+	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
+	   (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
+		if (!piocb->vport)
+			BUG();
+		else
+			mod_timer(&piocb->vport->els_tmofunc,
+				  jiffies + HZ * (phba->fc_ratov << 1));
+	}
+
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_ringtx_get - Get first element of the txq
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function is called with hbalock held to get next
+ * iocb in txq of the given ring. If there is any iocb in
+ * the txq, the function returns first iocb in the list after
+ * removing the iocb from the list, else it returns NULL.
+ **/
+struct lpfc_iocbq *
+lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	struct lpfc_iocbq *cmd_iocb;
+
+	list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
+	if (cmd_iocb != NULL)
+		pring->txq_cnt--;
+	return cmd_iocb;
+}
+
+/**
+ * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function is called with hbalock held and the caller must post the
+ * iocb without releasing the lock. If the caller releases the lock,
+ * iocb slot returned by the function is not guaranteed to be available.
+ * The function returns pointer to the next available iocb slot if there
+ * is available slot in the ring, else it returns NULL.
+ * If the get index of the ring is ahead of the put index, the function
+ * will post an error attention event to the worker thread to take the
+ * HBA to offline state.
+ **/
+static IOCB_t *
+lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
+	uint32_t  max_cmd_idx = pring->numCiocb;
+	if ((pring->next_cmdidx == pring->cmdidx) &&
+	   (++pring->next_cmdidx >= max_cmd_idx))
+		pring->next_cmdidx = 0;
+
+	if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
+
+		pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
+
+		if (unlikely(pring->local_getidx >= max_cmd_idx)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"0315 Ring %d issue: portCmdGet %d "
+					"is bigger than cmd ring %d\n",
+					pring->ringno,
+					pring->local_getidx, max_cmd_idx);
+
+			phba->link_state = LPFC_HBA_ERROR;
+			/*
+			 * All error attention handlers are posted to
+			 * worker thread
+			 */
+			phba->work_ha |= HA_ERATT;
+			phba->work_hs = HS_FFER3;
+
+			lpfc_worker_wake_up(phba);
+
+			return NULL;
+		}
+
+		if (pring->local_getidx == pring->next_cmdidx)
+			return NULL;
+	}
+
+	return lpfc_cmd_iocb(phba, pring);
+}
+
+/**
+ * lpfc_sli_next_iotag - Get an iotag for the iocb
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function gets an iotag for the iocb. If there is no unused iotag and
+ * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
+ * array and assigns a new iotag.
+ * The function returns the allocated iotag if successful, else returns zero.
+ * Zero is not a valid iotag.
+ * The caller is not required to hold any lock.
+ **/
+uint16_t
+lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+	struct lpfc_iocbq **new_arr;
+	struct lpfc_iocbq **old_arr;
+	size_t new_len;
+	struct lpfc_sli *psli = &phba->sli;
+	uint16_t iotag;
+
+	spin_lock_irq(&phba->hbalock);
+	iotag = psli->last_iotag;
+	if(++iotag < psli->iocbq_lookup_len) {
+		psli->last_iotag = iotag;
+		psli->iocbq_lookup[iotag] = iocbq;
+		spin_unlock_irq(&phba->hbalock);
+		iocbq->iotag = iotag;
+		return iotag;
+	} else if (psli->iocbq_lookup_len < (0xffff
+					   - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
+		new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
+		spin_unlock_irq(&phba->hbalock);
+		new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
+				  GFP_KERNEL);
+		if (new_arr) {
+			spin_lock_irq(&phba->hbalock);
+			old_arr = psli->iocbq_lookup;
+			if (new_len <= psli->iocbq_lookup_len) {
+				/* highly unprobable case */
+				kfree(new_arr);
+				iotag = psli->last_iotag;
+				if(++iotag < psli->iocbq_lookup_len) {
+					psli->last_iotag = iotag;
+					psli->iocbq_lookup[iotag] = iocbq;
+					spin_unlock_irq(&phba->hbalock);
+					iocbq->iotag = iotag;
+					return iotag;
+				}
+				spin_unlock_irq(&phba->hbalock);
+				return 0;
+			}
+			if (psli->iocbq_lookup)
+				memcpy(new_arr, old_arr,
+				       ((psli->last_iotag  + 1) *
+					sizeof (struct lpfc_iocbq *)));
+			psli->iocbq_lookup = new_arr;
+			psli->iocbq_lookup_len = new_len;
+			psli->last_iotag = iotag;
+			psli->iocbq_lookup[iotag] = iocbq;
+			spin_unlock_irq(&phba->hbalock);
+			iocbq->iotag = iotag;
+			kfree(old_arr);
+			return iotag;
+		}
+	} else
+		spin_unlock_irq(&phba->hbalock);
+
+	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+			"0318 Failed to allocate IOTAG.last IOTAG is %d\n",
+			psli->last_iotag);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_submit_iocb - Submit an iocb to the firmware
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @iocb: Pointer to iocb slot in the ring.
+ * @nextiocb: Pointer to driver iocb object which need to be
+ *            posted to firmware.
+ *
+ * This function is called with hbalock held to post a new iocb to
+ * the firmware. This function copies the new iocb to ring iocb slot and
+ * updates the ring pointers. It adds the new iocb to txcmplq if there is
+ * a completion call back for this iocb else the function will free the
+ * iocb object.
+ **/
+static void
+lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+		IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
+{
+	/*
+	 * Set up an iotag
+	 */
+	nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
+
+
+	if (pring->ringno == LPFC_ELS_RING) {
+		lpfc_debugfs_slow_ring_trc(phba,
+			"IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
+			*(((uint32_t *) &nextiocb->iocb) + 4),
+			*(((uint32_t *) &nextiocb->iocb) + 6),
+			*(((uint32_t *) &nextiocb->iocb) + 7));
+	}
+
+	/*
+	 * Issue iocb command to adapter
+	 */
+	lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
+	wmb();
+	pring->stats.iocb_cmd++;
+
+	/*
+	 * If there is no completion routine to call, we can release the
+	 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
+	 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
+	 */
+	if (nextiocb->iocb_cmpl)
+		lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
+	else
+		__lpfc_sli_release_iocbq(phba, nextiocb);
+
+	/*
+	 * Let the HBA know what IOCB slot will be the next one the
+	 * driver will put a command into.
+	 */
+	pring->cmdidx = pring->next_cmdidx;
+	writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
+}
+
+/**
+ * lpfc_sli_update_full_ring - Update the chip attention register
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * The caller is not required to hold any lock for calling this function.
+ * This function updates the chip attention bits for the ring to inform firmware
+ * that there are pending work to be done for this ring and requests an
+ * interrupt when there is space available in the ring. This function is
+ * called when the driver is unable to post more iocbs to the ring due
+ * to unavailability of space in the ring.
+ **/
+static void
+lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	int ringno = pring->ringno;
+
+	pring->flag |= LPFC_CALL_RING_AVAILABLE;
+
+	wmb();
+
+	/*
+	 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
+	 * The HBA will tell us when an IOCB entry is available.
+	 */
+	writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
+	readl(phba->CAregaddr); /* flush */
+
+	pring->stats.iocb_cmd_full++;
+}
+
+/**
+ * lpfc_sli_update_ring - Update chip attention register
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function updates the chip attention register bit for the
+ * given ring to inform HBA that there is more work to be done
+ * in this ring. The caller is not required to hold any lock.
+ **/
+static void
+lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	int ringno = pring->ringno;
+
+	/*
+	 * Tell the HBA that there is work to do in this ring.
+	 */
+	if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
+		wmb();
+		writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
+		readl(phba->CAregaddr); /* flush */
+	}
+}
+
+/**
+ * lpfc_sli_resume_iocb - Process iocbs in the txq
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function is called with hbalock held to post pending iocbs
+ * in the txq to the firmware. This function is called when driver
+ * detects space available in the ring.
+ **/
+static void
+lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	IOCB_t *iocb;
+	struct lpfc_iocbq *nextiocb;
+
+	/*
+	 * Check to see if:
+	 *  (a) there is anything on the txq to send
+	 *  (b) link is up
+	 *  (c) link attention events can be processed (fcp ring only)
+	 *  (d) IOCB processing is not blocked by the outstanding mbox command.
+	 */
+	if (pring->txq_cnt &&
+	    lpfc_is_link_up(phba) &&
+	    (pring->ringno != phba->sli.fcp_ring ||
+	     phba->sli.sli_flag & LPFC_PROCESS_LA)) {
+
+		while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
+		       (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
+			lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
+
+		if (iocb)
+			lpfc_sli_update_ring(phba, pring);
+		else
+			lpfc_sli_update_full_ring(phba, pring);
+	}
+
+	return;
+}
+
+/**
+ * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ *
+ * This function is called with hbalock held to get the next
+ * available slot for the given HBQ. If there is free slot
+ * available for the HBQ it will return pointer to the next available
+ * HBQ entry else it will return NULL.
+ **/
+static struct lpfc_hbq_entry *
+lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
+{
+	struct hbq_s *hbqp = &phba->hbqs[hbqno];
+
+	if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
+	    ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
+		hbqp->next_hbqPutIdx = 0;
+
+	if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
+		uint32_t raw_index = phba->hbq_get[hbqno];
+		uint32_t getidx = le32_to_cpu(raw_index);
+
+		hbqp->local_hbqGetIdx = getidx;
+
+		if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
+			lpfc_printf_log(phba, KERN_ERR,
+					LOG_SLI | LOG_VPORT,
+					"1802 HBQ %d: local_hbqGetIdx "
+					"%u is > than hbqp->entry_count %u\n",
+					hbqno, hbqp->local_hbqGetIdx,
+					hbqp->entry_count);
+
+			phba->link_state = LPFC_HBA_ERROR;
+			return NULL;
+		}
+
+		if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
+			return NULL;
+	}
+
+	return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
+			hbqp->hbqPutIdx;
+}
+
+/**
+ * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held to free all the
+ * hbq buffers while uninitializing the SLI interface. It also
+ * frees the HBQ buffers returned by the firmware but not yet
+ * processed by the upper layers.
+ **/
+void
+lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
+{
+	struct lpfc_dmabuf *dmabuf, *next_dmabuf;
+	struct hbq_dmabuf *hbq_buf;
+	unsigned long flags;
+	int i, hbq_count;
+	uint32_t hbqno;
+
+	hbq_count = lpfc_sli_hbq_count();
+	/* Return all memory used by all HBQs */
+	spin_lock_irqsave(&phba->hbalock, flags);
+	for (i = 0; i < hbq_count; ++i) {
+		list_for_each_entry_safe(dmabuf, next_dmabuf,
+				&phba->hbqs[i].hbq_buffer_list, list) {
+			hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
+			list_del(&hbq_buf->dbuf.list);
+			(phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
+		}
+		phba->hbqs[i].buffer_count = 0;
+	}
+	/* Return all HBQ buffer that are in-fly */
+	list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
+				 list) {
+		hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
+		list_del(&hbq_buf->dbuf.list);
+		if (hbq_buf->tag == -1) {
+			(phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
+				(phba, hbq_buf);
+		} else {
+			hbqno = hbq_buf->tag >> 16;
+			if (hbqno >= LPFC_MAX_HBQS)
+				(phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
+					(phba, hbq_buf);
+			else
+				(phba->hbqs[hbqno].hbq_free_buffer)(phba,
+					hbq_buf);
+		}
+	}
+
+	/* Mark the HBQs not in use */
+	phba->hbq_in_use = 0;
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+}
+
+/**
+ * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @hbq_buf: Pointer to HBQ buffer.
+ *
+ * This function is called with the hbalock held to post a
+ * hbq buffer to the firmware. If the function finds an empty
+ * slot in the HBQ, it will post the buffer. The function will return
+ * pointer to the hbq entry if it successfully post the buffer
+ * else it will return NULL.
+ **/
+static int
+lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
+			 struct hbq_dmabuf *hbq_buf)
+{
+	return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
+}
+
+/**
+ * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @hbq_buf: Pointer to HBQ buffer.
+ *
+ * This function is called with the hbalock held to post a hbq buffer to the
+ * firmware. If the function finds an empty slot in the HBQ, it will post the
+ * buffer and place it on the hbq_buffer_list. The function will return zero if
+ * it successfully post the buffer else it will return an error.
+ **/
+static int
+lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
+			    struct hbq_dmabuf *hbq_buf)
+{
+	struct lpfc_hbq_entry *hbqe;
+	dma_addr_t physaddr = hbq_buf->dbuf.phys;
+
+	/* Get next HBQ entry slot to use */
+	hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
+	if (hbqe) {
+		struct hbq_s *hbqp = &phba->hbqs[hbqno];
+
+		hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
+		hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
+		hbqe->bde.tus.f.bdeSize = hbq_buf->size;
+		hbqe->bde.tus.f.bdeFlags = 0;
+		hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
+		hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
+				/* Sync SLIM */
+		hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
+		writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
+				/* flush */
+		readl(phba->hbq_put + hbqno);
+		list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
+		return 0;
+	} else
+		return -ENOMEM;
+}
+
+/**
+ * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @hbq_buf: Pointer to HBQ buffer.
+ *
+ * This function is called with the hbalock held to post an RQE to the SLI4
+ * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
+ * the hbq_buffer_list and return zero, otherwise it will return an error.
+ **/
+static int
+lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
+			    struct hbq_dmabuf *hbq_buf)
+{
+	int rc;
+	struct lpfc_rqe hrqe;
+	struct lpfc_rqe drqe;
+
+	hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
+	hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
+	drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
+	drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
+	rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
+			      &hrqe, &drqe);
+	if (rc < 0)
+		return rc;
+	hbq_buf->tag = rc;
+	list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
+	return 0;
+}
+
+/* HBQ for ELS and CT traffic. */
+static struct lpfc_hbq_init lpfc_els_hbq = {
+	.rn = 1,
+	.entry_count = 256,
+	.mask_count = 0,
+	.profile = 0,
+	.ring_mask = (1 << LPFC_ELS_RING),
+	.buffer_count = 0,
+	.init_count = 40,
+	.add_count = 40,
+};
+
+/* HBQ for the extra ring if needed */
+static struct lpfc_hbq_init lpfc_extra_hbq = {
+	.rn = 1,
+	.entry_count = 200,
+	.mask_count = 0,
+	.profile = 0,
+	.ring_mask = (1 << LPFC_EXTRA_RING),
+	.buffer_count = 0,
+	.init_count = 0,
+	.add_count = 5,
+};
+
+/* Array of HBQs */
+struct lpfc_hbq_init *lpfc_hbq_defs[] = {
+	&lpfc_els_hbq,
+	&lpfc_extra_hbq,
+};
+
+/**
+ * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @count: Number of HBQ buffers to be posted.
+ *
+ * This function is called with no lock held to post more hbq buffers to the
+ * given HBQ. The function returns the number of HBQ buffers successfully
+ * posted.
+ **/
+static int
+lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
+{
+	uint32_t i, posted = 0;
+	unsigned long flags;
+	struct hbq_dmabuf *hbq_buffer;
+	LIST_HEAD(hbq_buf_list);
+	if (!phba->hbqs[hbqno].hbq_alloc_buffer)
+		return 0;
+
+	if ((phba->hbqs[hbqno].buffer_count + count) >
+	    lpfc_hbq_defs[hbqno]->entry_count)
+		count = lpfc_hbq_defs[hbqno]->entry_count -
+					phba->hbqs[hbqno].buffer_count;
+	if (!count)
+		return 0;
+	/* Allocate HBQ entries */
+	for (i = 0; i < count; i++) {
+		hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
+		if (!hbq_buffer)
+			break;
+		list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
+	}
+	/* Check whether HBQ is still in use */
+	spin_lock_irqsave(&phba->hbalock, flags);
+	if (!phba->hbq_in_use)
+		goto err;
+	while (!list_empty(&hbq_buf_list)) {
+		list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
+				 dbuf.list);
+		hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
+				      (hbqno << 16));
+		if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
+			phba->hbqs[hbqno].buffer_count++;
+			posted++;
+		} else
+			(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
+	}
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	return posted;
+err:
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	while (!list_empty(&hbq_buf_list)) {
+		list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
+				 dbuf.list);
+		(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
+	}
+	return 0;
+}
+
+/**
+ * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
+ * @phba: Pointer to HBA context object.
+ * @qno: HBQ number.
+ *
+ * This function posts more buffers to the HBQ. This function
+ * is called with no lock held. The function returns the number of HBQ entries
+ * successfully allocated.
+ **/
+int
+lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
+{
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		return 0;
+	else
+		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+					 lpfc_hbq_defs[qno]->add_count);
+}
+
+/**
+ * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
+ * @phba: Pointer to HBA context object.
+ * @qno:  HBQ queue number.
+ *
+ * This function is called from SLI initialization code path with
+ * no lock held to post initial HBQ buffers to firmware. The
+ * function returns the number of HBQ entries successfully allocated.
+ **/
+static int
+lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
+{
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+					lpfc_hbq_defs[qno]->entry_count);
+	else
+		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+					 lpfc_hbq_defs[qno]->init_count);
+}
+
+/**
+ * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ *
+ * This function removes the first hbq buffer on an hbq list and returns a
+ * pointer to that buffer. If it finds no buffers on the list it returns NULL.
+ **/
+static struct hbq_dmabuf *
+lpfc_sli_hbqbuf_get(struct list_head *rb_list)
+{
+	struct lpfc_dmabuf *d_buf;
+
+	list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
+	if (!d_buf)
+		return NULL;
+	return container_of(d_buf, struct hbq_dmabuf, dbuf);
+}
+
+/**
+ * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
+ * @phba: Pointer to HBA context object.
+ * @tag: Tag of the hbq buffer.
+ *
+ * This function is called with hbalock held. This function searches
+ * for the hbq buffer associated with the given tag in the hbq buffer
+ * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
+ * it returns NULL.
+ **/
+static struct hbq_dmabuf *
+lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
+{
+	struct lpfc_dmabuf *d_buf;
+	struct hbq_dmabuf *hbq_buf;
+	uint32_t hbqno;
+
+	hbqno = tag >> 16;
+	if (hbqno >= LPFC_MAX_HBQS)
+		return NULL;
+
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
+		hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+		if (hbq_buf->tag == tag) {
+			spin_unlock_irq(&phba->hbalock);
+			return hbq_buf;
+		}
+	}
+	spin_unlock_irq(&phba->hbalock);
+	lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
+			"1803 Bad hbq tag. Data: x%x x%x\n",
+			tag, phba->hbqs[tag >> 16].buffer_count);
+	return NULL;
+}
+
+/**
+ * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
+ * @phba: Pointer to HBA context object.
+ * @hbq_buffer: Pointer to HBQ buffer.
+ *
+ * This function is called with hbalock. This function gives back
+ * the hbq buffer to firmware. If the HBQ does not have space to
+ * post the buffer, it will free the buffer.
+ **/
+void
+lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
+{
+	uint32_t hbqno;
+
+	if (hbq_buffer) {
+		hbqno = hbq_buffer->tag >> 16;
+		if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
+			(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
+	}
+}
+
+/**
+ * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
+ * @mbxCommand: mailbox command code.
+ *
+ * This function is called by the mailbox event handler function to verify
+ * that the completed mailbox command is a legitimate mailbox command. If the
+ * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
+ * and the mailbox event handler will take the HBA offline.
+ **/
+static int
+lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
+{
+	uint8_t ret;
+
+	switch (mbxCommand) {
+	case MBX_LOAD_SM:
+	case MBX_READ_NV:
+	case MBX_WRITE_NV:
+	case MBX_WRITE_VPARMS:
+	case MBX_RUN_BIU_DIAG:
+	case MBX_INIT_LINK:
+	case MBX_DOWN_LINK:
+	case MBX_CONFIG_LINK:
+	case MBX_CONFIG_RING:
+	case MBX_RESET_RING:
+	case MBX_READ_CONFIG:
+	case MBX_READ_RCONFIG:
+	case MBX_READ_SPARM:
+	case MBX_READ_STATUS:
+	case MBX_READ_RPI:
+	case MBX_READ_XRI:
+	case MBX_READ_REV:
+	case MBX_READ_LNK_STAT:
+	case MBX_REG_LOGIN:
+	case MBX_UNREG_LOGIN:
+	case MBX_CLEAR_LA:
+	case MBX_DUMP_MEMORY:
+	case MBX_DUMP_CONTEXT:
+	case MBX_RUN_DIAGS:
+	case MBX_RESTART:
+	case MBX_UPDATE_CFG:
+	case MBX_DOWN_LOAD:
+	case MBX_DEL_LD_ENTRY:
+	case MBX_RUN_PROGRAM:
+	case MBX_SET_MASK:
+	case MBX_SET_VARIABLE:
+	case MBX_UNREG_D_ID:
+	case MBX_KILL_BOARD:
+	case MBX_CONFIG_FARP:
+	case MBX_BEACON:
+	case MBX_LOAD_AREA:
+	case MBX_RUN_BIU_DIAG64:
+	case MBX_CONFIG_PORT:
+	case MBX_READ_SPARM64:
+	case MBX_READ_RPI64:
+	case MBX_REG_LOGIN64:
+	case MBX_READ_TOPOLOGY:
+	case MBX_WRITE_WWN:
+	case MBX_SET_DEBUG:
+	case MBX_LOAD_EXP_ROM:
+	case MBX_ASYNCEVT_ENABLE:
+	case MBX_REG_VPI:
+	case MBX_UNREG_VPI:
+	case MBX_HEARTBEAT:
+	case MBX_PORT_CAPABILITIES:
+	case MBX_PORT_IOV_CONTROL:
+	case MBX_SLI4_CONFIG:
+	case MBX_SLI4_REQ_FTRS:
+	case MBX_REG_FCFI:
+	case MBX_UNREG_FCFI:
+	case MBX_REG_VFI:
+	case MBX_UNREG_VFI:
+	case MBX_INIT_VPI:
+	case MBX_INIT_VFI:
+	case MBX_RESUME_RPI:
+	case MBX_READ_EVENT_LOG_STATUS:
+	case MBX_READ_EVENT_LOG:
+	case MBX_SECURITY_MGMT:
+	case MBX_AUTH_PORT:
+		ret = mbxCommand;
+		break;
+	default:
+		ret = MBX_SHUTDOWN;
+		break;
+	}
+	return ret;
+}
+
+/**
+ * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox commands issued from
+ * lpfc_sli_issue_mbox_wait function. This function is called by the
+ * mailbox event handler function with no lock held. This function
+ * will wake up thread waiting on the wait queue pointed by context1
+ * of the mailbox.
+ **/
+void
+lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+	wait_queue_head_t *pdone_q;
+	unsigned long drvr_flag;
+
+	/*
+	 * If pdone_q is empty, the driver thread gave up waiting and
+	 * continued running.
+	 */
+	pmboxq->mbox_flag |= LPFC_MBX_WAKE;
+	spin_lock_irqsave(&phba->hbalock, drvr_flag);
+	pdone_q = (wait_queue_head_t *) pmboxq->context1;
+	if (pdone_q)
+		wake_up_interruptible(pdone_q);
+	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+	return;
+}
+
+
+/**
+ * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
+ * @phba: Pointer to HBA context object.
+ * @pmb: Pointer to mailbox object.
+ *
+ * This function is the default mailbox completion handler. It
+ * frees the memory resources associated with the completed mailbox
+ * command. If the completed command is a REG_LOGIN mailbox command,
+ * this function will issue a UREG_LOGIN to re-claim the RPI.
+ **/
+void
+lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	struct lpfc_vport  *vport = pmb->vport;
+	struct lpfc_dmabuf *mp;
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost;
+	uint16_t rpi, vpi;
+	int rc;
+
+	mp = (struct lpfc_dmabuf *) (pmb->context1);
+
+	if (mp) {
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+	}
+
+	/*
+	 * If a REG_LOGIN succeeded  after node is destroyed or node
+	 * is in re-discovery driver need to cleanup the RPI.
+	 */
+	if (!(phba->pport->load_flag & FC_UNLOADING) &&
+	    pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
+	    !pmb->u.mb.mbxStatus) {
+		rpi = pmb->u.mb.un.varWords[0];
+		vpi = pmb->u.mb.un.varRegLogin.vpi;
+		lpfc_unreg_login(phba, vpi, rpi, pmb);
+		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+		if (rc != MBX_NOT_FINISHED)
+			return;
+	}
+
+	if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
+		!(phba->pport->load_flag & FC_UNLOADING) &&
+		!pmb->u.mb.mbxStatus) {
+		shost = lpfc_shost_from_vport(vport);
+		spin_lock_irq(shost->host_lock);
+		vport->vpi_state |= LPFC_VPI_REGISTERED;
+		vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+		spin_unlock_irq(shost->host_lock);
+	}
+
+	if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+		ndlp = (struct lpfc_nodelist *)pmb->context2;
+		lpfc_nlp_put(ndlp);
+		pmb->context2 = NULL;
+	}
+
+	/* Check security permission status on INIT_LINK mailbox command */
+	if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
+	    (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"2860 SLI authentication is required "
+				"for INIT_LINK but has not done yet\n");
+
+	if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
+		lpfc_sli4_mbox_cmd_free(phba, pmb);
+	else
+		mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held. This function processes all
+ * the completed mailbox commands and gives it to upper layers. The interrupt
+ * service routine processes mailbox completion interrupt and adds completed
+ * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
+ * Worker thread call lpfc_sli_handle_mb_event, which will return the
+ * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
+ * function returns the mailbox commands to the upper layer by calling the
+ * completion handler function of each mailbox.
+ **/
+int
+lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
+{
+	MAILBOX_t *pmbox;
+	LPFC_MBOXQ_t *pmb;
+	int rc;
+	LIST_HEAD(cmplq);
+
+	phba->sli.slistat.mbox_event++;
+
+	/* Get all completed mailboxe buffers into the cmplq */
+	spin_lock_irq(&phba->hbalock);
+	list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Get a Mailbox buffer to setup mailbox commands for callback */
+	do {
+		list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
+		if (pmb == NULL)
+			break;
+
+		pmbox = &pmb->u.mb;
+
+		if (pmbox->mbxCommand != MBX_HEARTBEAT) {
+			if (pmb->vport) {
+				lpfc_debugfs_disc_trc(pmb->vport,
+					LPFC_DISC_TRC_MBOX_VPORT,
+					"MBOX cmpl vport: cmd:x%x mb:x%x x%x",
+					(uint32_t)pmbox->mbxCommand,
+					pmbox->un.varWords[0],
+					pmbox->un.varWords[1]);
+			}
+			else {
+				lpfc_debugfs_disc_trc(phba->pport,
+					LPFC_DISC_TRC_MBOX,
+					"MBOX cmpl:       cmd:x%x mb:x%x x%x",
+					(uint32_t)pmbox->mbxCommand,
+					pmbox->un.varWords[0],
+					pmbox->un.varWords[1]);
+			}
+		}
+
+		/*
+		 * It is a fatal error if unknown mbox command completion.
+		 */
+		if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
+		    MBX_SHUTDOWN) {
+			/* Unknown mailbox command compl */
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"(%d):0323 Unknown Mailbox command "
+					"x%x (x%x/x%x) Cmpl\n",
+					pmb->vport ? pmb->vport->vpi : 0,
+					pmbox->mbxCommand,
+					lpfc_sli_config_mbox_subsys_get(phba,
+									pmb),
+					lpfc_sli_config_mbox_opcode_get(phba,
+									pmb));
+			phba->link_state = LPFC_HBA_ERROR;
+			phba->work_hs = HS_FFER3;
+			lpfc_handle_eratt(phba);
+			continue;
+		}
+
+		if (pmbox->mbxStatus) {
+			phba->sli.slistat.mbox_stat_err++;
+			if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
+				/* Mbox cmd cmpl error - RETRYing */
+				lpfc_printf_log(phba, KERN_INFO,
+					LOG_MBOX | LOG_SLI,
+					"(%d):0305 Mbox cmd cmpl "
+					"error - RETRYing Data: x%x "
+					"(x%x/x%x) x%x x%x x%x\n",
+					pmb->vport ? pmb->vport->vpi : 0,
+					pmbox->mbxCommand,
+					lpfc_sli_config_mbox_subsys_get(phba,
+									pmb),
+					lpfc_sli_config_mbox_opcode_get(phba,
+									pmb),
+					pmbox->mbxStatus,
+					pmbox->un.varWords[0],
+					pmb->vport->port_state);
+				pmbox->mbxStatus = 0;
+				pmbox->mbxOwner = OWN_HOST;
+				rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+				if (rc != MBX_NOT_FINISHED)
+					continue;
+			}
+		}
+
+		/* Mailbox cmd <cmd> Cmpl <cmpl> */
+		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+				"(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
+				"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
+				pmb->vport ? pmb->vport->vpi : 0,
+				pmbox->mbxCommand,
+				lpfc_sli_config_mbox_subsys_get(phba, pmb),
+				lpfc_sli_config_mbox_opcode_get(phba, pmb),
+				pmb->mbox_cmpl,
+				*((uint32_t *) pmbox),
+				pmbox->un.varWords[0],
+				pmbox->un.varWords[1],
+				pmbox->un.varWords[2],
+				pmbox->un.varWords[3],
+				pmbox->un.varWords[4],
+				pmbox->un.varWords[5],
+				pmbox->un.varWords[6],
+				pmbox->un.varWords[7]);
+
+		if (pmb->mbox_cmpl)
+			pmb->mbox_cmpl(phba,pmb);
+	} while (1);
+	return 0;
+}
+
+/**
+ * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @tag: buffer tag.
+ *
+ * This function is called with no lock held. When QUE_BUFTAG_BIT bit
+ * is set in the tag the buffer is posted for a particular exchange,
+ * the function will return the buffer without replacing the buffer.
+ * If the buffer is for unsolicited ELS or CT traffic, this function
+ * returns the buffer and also posts another buffer to the firmware.
+ **/
+static struct lpfc_dmabuf *
+lpfc_sli_get_buff(struct lpfc_hba *phba,
+		  struct lpfc_sli_ring *pring,
+		  uint32_t tag)
+{
+	struct hbq_dmabuf *hbq_entry;
+
+	if (tag & QUE_BUFTAG_BIT)
+		return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
+	hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
+	if (!hbq_entry)
+		return NULL;
+	return &hbq_entry->dbuf;
+}
+
+/**
+ * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
+ * @fch_r_ctl: the r_ctl for the first frame of the sequence.
+ * @fch_type: the type for the first frame of the sequence.
+ *
+ * This function is called with no lock held. This function uses the r_ctl and
+ * type of the received sequence to find the correct callback function to call
+ * to process the sequence.
+ **/
+static int
+lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
+			 uint32_t fch_type)
+{
+	int i;
+
+	/* unSolicited Responses */
+	if (pring->prt[0].profile) {
+		if (pring->prt[0].lpfc_sli_rcv_unsol_event)
+			(pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
+									saveq);
+		return 1;
+	}
+	/* We must search, based on rctl / type
+	   for the right routine */
+	for (i = 0; i < pring->num_mask; i++) {
+		if ((pring->prt[i].rctl == fch_r_ctl) &&
+		    (pring->prt[i].type == fch_type)) {
+			if (pring->prt[i].lpfc_sli_rcv_unsol_event)
+				(pring->prt[i].lpfc_sli_rcv_unsol_event)
+						(phba, pring, saveq);
+			return 1;
+		}
+	}
+	return 0;
+}
+
+/**
+ * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @saveq: Pointer to the unsolicited iocb.
+ *
+ * This function is called with no lock held by the ring event handler
+ * when there is an unsolicited iocb posted to the response ring by the
+ * firmware. This function gets the buffer associated with the iocbs
+ * and calls the event handler for the ring. This function handles both
+ * qring buffers and hbq buffers.
+ * When the function returns 1 the caller can free the iocb object otherwise
+ * upper layer functions will free the iocb objects.
+ **/
+static int
+lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			    struct lpfc_iocbq *saveq)
+{
+	IOCB_t           * irsp;
+	WORD5            * w5p;
+	uint32_t           Rctl, Type;
+	uint32_t           match;
+	struct lpfc_iocbq *iocbq;
+	struct lpfc_dmabuf *dmzbuf;
+
+	match = 0;
+	irsp = &(saveq->iocb);
+
+	if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
+		if (pring->lpfc_sli_rcv_async_status)
+			pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
+		else
+			lpfc_printf_log(phba,
+					KERN_WARNING,
+					LOG_SLI,
+					"0316 Ring %d handler: unexpected "
+					"ASYNC_STATUS iocb received evt_code "
+					"0x%x\n",
+					pring->ringno,
+					irsp->un.asyncstat.evt_code);
+		return 1;
+	}
+
+	if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
+		(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
+		if (irsp->ulpBdeCount > 0) {
+			dmzbuf = lpfc_sli_get_buff(phba, pring,
+					irsp->un.ulpWord[3]);
+			lpfc_in_buf_free(phba, dmzbuf);
+		}
+
+		if (irsp->ulpBdeCount > 1) {
+			dmzbuf = lpfc_sli_get_buff(phba, pring,
+					irsp->unsli3.sli3Words[3]);
+			lpfc_in_buf_free(phba, dmzbuf);
+		}
+
+		if (irsp->ulpBdeCount > 2) {
+			dmzbuf = lpfc_sli_get_buff(phba, pring,
+				irsp->unsli3.sli3Words[7]);
+			lpfc_in_buf_free(phba, dmzbuf);
+		}
+
+		return 1;
+	}
+
+	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+		if (irsp->ulpBdeCount != 0) {
+			saveq->context2 = lpfc_sli_get_buff(phba, pring,
+						irsp->un.ulpWord[3]);
+			if (!saveq->context2)
+				lpfc_printf_log(phba,
+					KERN_ERR,
+					LOG_SLI,
+					"0341 Ring %d Cannot find buffer for "
+					"an unsolicited iocb. tag 0x%x\n",
+					pring->ringno,
+					irsp->un.ulpWord[3]);
+		}
+		if (irsp->ulpBdeCount == 2) {
+			saveq->context3 = lpfc_sli_get_buff(phba, pring,
+						irsp->unsli3.sli3Words[7]);
+			if (!saveq->context3)
+				lpfc_printf_log(phba,
+					KERN_ERR,
+					LOG_SLI,
+					"0342 Ring %d Cannot find buffer for an"
+					" unsolicited iocb. tag 0x%x\n",
+					pring->ringno,
+					irsp->unsli3.sli3Words[7]);
+		}
+		list_for_each_entry(iocbq, &saveq->list, list) {
+			irsp = &(iocbq->iocb);
+			if (irsp->ulpBdeCount != 0) {
+				iocbq->context2 = lpfc_sli_get_buff(phba, pring,
+							irsp->un.ulpWord[3]);
+				if (!iocbq->context2)
+					lpfc_printf_log(phba,
+						KERN_ERR,
+						LOG_SLI,
+						"0343 Ring %d Cannot find "
+						"buffer for an unsolicited iocb"
+						". tag 0x%x\n", pring->ringno,
+						irsp->un.ulpWord[3]);
+			}
+			if (irsp->ulpBdeCount == 2) {
+				iocbq->context3 = lpfc_sli_get_buff(phba, pring,
+						irsp->unsli3.sli3Words[7]);
+				if (!iocbq->context3)
+					lpfc_printf_log(phba,
+						KERN_ERR,
+						LOG_SLI,
+						"0344 Ring %d Cannot find "
+						"buffer for an unsolicited "
+						"iocb. tag 0x%x\n",
+						pring->ringno,
+						irsp->unsli3.sli3Words[7]);
+			}
+		}
+	}
+	if (irsp->ulpBdeCount != 0 &&
+	    (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
+	     irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
+		int found = 0;
+
+		/* search continue save q for same XRI */
+		list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
+			if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
+				saveq->iocb.unsli3.rcvsli3.ox_id) {
+				list_add_tail(&saveq->list, &iocbq->list);
+				found = 1;
+				break;
+			}
+		}
+		if (!found)
+			list_add_tail(&saveq->clist,
+				      &pring->iocb_continue_saveq);
+		if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
+			list_del_init(&iocbq->clist);
+			saveq = iocbq;
+			irsp = &(saveq->iocb);
+		} else
+			return 0;
+	}
+	if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
+	    (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
+	    (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
+		Rctl = FC_RCTL_ELS_REQ;
+		Type = FC_TYPE_ELS;
+	} else {
+		w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
+		Rctl = w5p->hcsw.Rctl;
+		Type = w5p->hcsw.Type;
+
+		/* Firmware Workaround */
+		if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
+			(irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
+			 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
+			Rctl = FC_RCTL_ELS_REQ;
+			Type = FC_TYPE_ELS;
+			w5p->hcsw.Rctl = Rctl;
+			w5p->hcsw.Type = Type;
+		}
+	}
+
+	if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"0313 Ring %d handler: unexpected Rctl x%x "
+				"Type x%x received\n",
+				pring->ringno, Rctl, Type);
+
+	return 1;
+}
+
+/**
+ * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @prspiocb: Pointer to response iocb object.
+ *
+ * This function looks up the iocb_lookup table to get the command iocb
+ * corresponding to the given response iocb using the iotag of the
+ * response iocb. This function is called with the hbalock held.
+ * This function returns the command iocb object if it finds the command
+ * iocb else returns NULL.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
+		      struct lpfc_sli_ring *pring,
+		      struct lpfc_iocbq *prspiocb)
+{
+	struct lpfc_iocbq *cmd_iocb = NULL;
+	uint16_t iotag;
+
+	iotag = prspiocb->iocb.ulpIoTag;
+
+	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
+		cmd_iocb = phba->sli.iocbq_lookup[iotag];
+		list_del_init(&cmd_iocb->list);
+		if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
+			pring->txcmplq_cnt--;
+			cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
+		}
+		return cmd_iocb;
+	}
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"0317 iotag x%x is out off "
+			"range: max iotag x%x wd0 x%x\n",
+			iotag, phba->sli.last_iotag,
+			*(((uint32_t *) &prspiocb->iocb) + 7));
+	return NULL;
+}
+
+/**
+ * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @iotag: IOCB tag.
+ *
+ * This function looks up the iocb_lookup table to get the command iocb
+ * corresponding to the given iotag. This function is called with the
+ * hbalock held.
+ * This function returns the command iocb object if it finds the command
+ * iocb else returns NULL.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
+			     struct lpfc_sli_ring *pring, uint16_t iotag)
+{
+	struct lpfc_iocbq *cmd_iocb;
+
+	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
+		cmd_iocb = phba->sli.iocbq_lookup[iotag];
+		list_del_init(&cmd_iocb->list);
+		if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
+			cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
+			pring->txcmplq_cnt--;
+		}
+		return cmd_iocb;
+	}
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"0372 iotag x%x is out off range: max iotag (x%x)\n",
+			iotag, phba->sli.last_iotag);
+	return NULL;
+}
+
+/**
+ * lpfc_sli_process_sol_iocb - process solicited iocb completion
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @saveq: Pointer to the response iocb to be processed.
+ *
+ * This function is called by the ring event handler for non-fcp
+ * rings when there is a new response iocb in the response ring.
+ * The caller is not required to hold any locks. This function
+ * gets the command iocb associated with the response iocb and
+ * calls the completion handler for the command iocb. If there
+ * is no completion handler, the function will free the resources
+ * associated with command iocb. If the response iocb is for
+ * an already aborted command iocb, the status of the completion
+ * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
+ * This function always returns 1.
+ **/
+static int
+lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			  struct lpfc_iocbq *saveq)
+{
+	struct lpfc_iocbq *cmdiocbp;
+	int rc = 1;
+	unsigned long iflag;
+
+	/* Based on the iotag field, get the cmd IOCB from the txcmplq */
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+	if (cmdiocbp) {
+		if (cmdiocbp->iocb_cmpl) {
+			/*
+			 * If an ELS command failed send an event to mgmt
+			 * application.
+			 */
+			if (saveq->iocb.ulpStatus &&
+			     (pring->ringno == LPFC_ELS_RING) &&
+			     (cmdiocbp->iocb.ulpCommand ==
+				CMD_ELS_REQUEST64_CR))
+				lpfc_send_els_failure_event(phba,
+					cmdiocbp, saveq);
+
+			/*
+			 * Post all ELS completions to the worker thread.
+			 * All other are passed to the completion callback.
+			 */
+			if (pring->ringno == LPFC_ELS_RING) {
+				if ((phba->sli_rev < LPFC_SLI_REV4) &&
+				    (cmdiocbp->iocb_flag &
+							LPFC_DRIVER_ABORTED)) {
+					spin_lock_irqsave(&phba->hbalock,
+							  iflag);
+					cmdiocbp->iocb_flag &=
+						~LPFC_DRIVER_ABORTED;
+					spin_unlock_irqrestore(&phba->hbalock,
+							       iflag);
+					saveq->iocb.ulpStatus =
+						IOSTAT_LOCAL_REJECT;
+					saveq->iocb.un.ulpWord[4] =
+						IOERR_SLI_ABORTED;
+
+					/* Firmware could still be in progress
+					 * of DMAing payload, so don't free data
+					 * buffer till after a hbeat.
+					 */
+					spin_lock_irqsave(&phba->hbalock,
+							  iflag);
+					saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
+					spin_unlock_irqrestore(&phba->hbalock,
+							       iflag);
+				}
+				if (phba->sli_rev == LPFC_SLI_REV4) {
+					if (saveq->iocb_flag &
+					    LPFC_EXCHANGE_BUSY) {
+						/* Set cmdiocb flag for the
+						 * exchange busy so sgl (xri)
+						 * will not be released until
+						 * the abort xri is received
+						 * from hba.
+						 */
+						spin_lock_irqsave(
+							&phba->hbalock, iflag);
+						cmdiocbp->iocb_flag |=
+							LPFC_EXCHANGE_BUSY;
+						spin_unlock_irqrestore(
+							&phba->hbalock, iflag);
+					}
+					if (cmdiocbp->iocb_flag &
+					    LPFC_DRIVER_ABORTED) {
+						/*
+						 * Clear LPFC_DRIVER_ABORTED
+						 * bit in case it was driver
+						 * initiated abort.
+						 */
+						spin_lock_irqsave(
+							&phba->hbalock, iflag);
+						cmdiocbp->iocb_flag &=
+							~LPFC_DRIVER_ABORTED;
+						spin_unlock_irqrestore(
+							&phba->hbalock, iflag);
+						cmdiocbp->iocb.ulpStatus =
+							IOSTAT_LOCAL_REJECT;
+						cmdiocbp->iocb.un.ulpWord[4] =
+							IOERR_ABORT_REQUESTED;
+						/*
+						 * For SLI4, irsiocb contains
+						 * NO_XRI in sli_xritag, it
+						 * shall not affect releasing
+						 * sgl (xri) process.
+						 */
+						saveq->iocb.ulpStatus =
+							IOSTAT_LOCAL_REJECT;
+						saveq->iocb.un.ulpWord[4] =
+							IOERR_SLI_ABORTED;
+						spin_lock_irqsave(
+							&phba->hbalock, iflag);
+						saveq->iocb_flag |=
+							LPFC_DELAY_MEM_FREE;
+						spin_unlock_irqrestore(
+							&phba->hbalock, iflag);
+					}
+				}
+			}
+			(cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
+		} else
+			lpfc_sli_release_iocbq(phba, cmdiocbp);
+	} else {
+		/*
+		 * Unknown initiating command based on the response iotag.
+		 * This could be the case on the ELS ring because of
+		 * lpfc_els_abort().
+		 */
+		if (pring->ringno != LPFC_ELS_RING) {
+			/*
+			 * Ring <ringno> handler: unexpected completion IoTag
+			 * <IoTag>
+			 */
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					 "0322 Ring %d handler: "
+					 "unexpected completion IoTag x%x "
+					 "Data: x%x x%x x%x x%x\n",
+					 pring->ringno,
+					 saveq->iocb.ulpIoTag,
+					 saveq->iocb.ulpStatus,
+					 saveq->iocb.un.ulpWord[4],
+					 saveq->iocb.ulpCommand,
+					 saveq->iocb.ulpContext);
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function is called from the iocb ring event handlers when
+ * put pointer is ahead of the get pointer for a ring. This function signal
+ * an error attention condition to the worker thread and the worker
+ * thread will transition the HBA to offline state.
+ **/
+static void
+lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
+	/*
+	 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
+	 * rsp ring <portRspMax>
+	 */
+	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"0312 Ring %d handler: portRspPut %d "
+			"is bigger than rsp ring %d\n",
+			pring->ringno, le32_to_cpu(pgp->rspPutInx),
+			pring->numRiocb);
+
+	phba->link_state = LPFC_HBA_ERROR;
+
+	/*
+	 * All error attention handlers are posted to
+	 * worker thread
+	 */
+	phba->work_ha |= HA_ERATT;
+	phba->work_hs = HS_FFER3;
+
+	lpfc_worker_wake_up(phba);
+
+	return;
+}
+
+/**
+ * lpfc_poll_eratt - Error attention polling timer timeout handler
+ * @ptr: Pointer to address of HBA context object.
+ *
+ * This function is invoked by the Error Attention polling timer when the
+ * timer times out. It will check the SLI Error Attention register for
+ * possible attention events. If so, it will post an Error Attention event
+ * and wake up worker thread to process it. Otherwise, it will set up the
+ * Error Attention polling timer for the next poll.
+ **/
+void lpfc_poll_eratt(unsigned long ptr)
+{
+	struct lpfc_hba *phba;
+	uint32_t eratt = 0;
+
+	phba = (struct lpfc_hba *)ptr;
+
+	/* Check chip HA register for error event */
+	eratt = lpfc_sli_check_eratt(phba);
+
+	if (eratt)
+		/* Tell the worker thread there is work to do */
+		lpfc_worker_wake_up(phba);
+	else
+		/* Restart the timer for next eratt poll */
+		mod_timer(&phba->eratt_poll, jiffies +
+					HZ * LPFC_ERATT_POLL_INTERVAL);
+	return;
+}
+
+
+/**
+ * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This function is called from the interrupt context when there is a ring
+ * event for the fcp ring. The caller does not hold any lock.
+ * The function processes each response iocb in the response ring until it
+ * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
+ * LE bit set. The function will call the completion handler of the command iocb
+ * if the response iocb indicates a completion for a command iocb or it is
+ * an abort completion. The function will call lpfc_sli_process_unsol_iocb
+ * function if this is an unsolicited iocb.
+ * This routine presumes LPFC_FCP_RING handling and doesn't bother
+ * to check it explicitly.
+ */
+int
+lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
+				struct lpfc_sli_ring *pring, uint32_t mask)
+{
+	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
+	IOCB_t *irsp = NULL;
+	IOCB_t *entry = NULL;
+	struct lpfc_iocbq *cmdiocbq = NULL;
+	struct lpfc_iocbq rspiocbq;
+	uint32_t status;
+	uint32_t portRspPut, portRspMax;
+	int rc = 1;
+	lpfc_iocb_type type;
+	unsigned long iflag;
+	uint32_t rsp_cmpl = 0;
+
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	pring->stats.iocb_event++;
+
+	/*
+	 * The next available response entry should never exceed the maximum
+	 * entries.  If it does, treat it as an adapter hardware error.
+	 */
+	portRspMax = pring->numRiocb;
+	portRspPut = le32_to_cpu(pgp->rspPutInx);
+	if (unlikely(portRspPut >= portRspMax)) {
+		lpfc_sli_rsp_pointers_error(phba, pring);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		return 1;
+	}
+	if (phba->fcp_ring_in_use) {
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		return 1;
+	} else
+		phba->fcp_ring_in_use = 1;
+
+	rmb();
+	while (pring->rspidx != portRspPut) {
+		/*
+		 * Fetch an entry off the ring and copy it into a local data
+		 * structure.  The copy involves a byte-swap since the
+		 * network byte order and pci byte orders are different.
+		 */
+		entry = lpfc_resp_iocb(phba, pring);
+		phba->last_completion_time = jiffies;
+
+		if (++pring->rspidx >= portRspMax)
+			pring->rspidx = 0;
+
+		lpfc_sli_pcimem_bcopy((uint32_t *) entry,
+				      (uint32_t *) &rspiocbq.iocb,
+				      phba->iocb_rsp_size);
+		INIT_LIST_HEAD(&(rspiocbq.list));
+		irsp = &rspiocbq.iocb;
+
+		type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
+		pring->stats.iocb_rsp++;
+		rsp_cmpl++;
+
+		if (unlikely(irsp->ulpStatus)) {
+			/*
+			 * If resource errors reported from HBA, reduce
+			 * queuedepths of the SCSI device.
+			 */
+			if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+				(irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
+				spin_unlock_irqrestore(&phba->hbalock, iflag);
+				phba->lpfc_rampdown_queue_depth(phba);
+				spin_lock_irqsave(&phba->hbalock, iflag);
+			}
+
+			/* Rsp ring <ringno> error: IOCB */
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					"0336 Rsp Ring %d error: IOCB Data: "
+					"x%x x%x x%x x%x x%x x%x x%x x%x\n",
+					pring->ringno,
+					irsp->un.ulpWord[0],
+					irsp->un.ulpWord[1],
+					irsp->un.ulpWord[2],
+					irsp->un.ulpWord[3],
+					irsp->un.ulpWord[4],
+					irsp->un.ulpWord[5],
+					*(uint32_t *)&irsp->un1,
+					*((uint32_t *)&irsp->un1 + 1));
+		}
+
+		switch (type) {
+		case LPFC_ABORT_IOCB:
+		case LPFC_SOL_IOCB:
+			/*
+			 * Idle exchange closed via ABTS from port.  No iocb
+			 * resources need to be recovered.
+			 */
+			if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
+				lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+						"0333 IOCB cmd 0x%x"
+						" processed. Skipping"
+						" completion\n",
+						irsp->ulpCommand);
+				break;
+			}
+
+			cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
+							 &rspiocbq);
+			if (unlikely(!cmdiocbq))
+				break;
+			if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+				cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+			if (cmdiocbq->iocb_cmpl) {
+				spin_unlock_irqrestore(&phba->hbalock, iflag);
+				(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
+						      &rspiocbq);
+				spin_lock_irqsave(&phba->hbalock, iflag);
+			}
+			break;
+		case LPFC_UNSOL_IOCB:
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
+			spin_lock_irqsave(&phba->hbalock, iflag);
+			break;
+		default:
+			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
+				char adaptermsg[LPFC_MAX_ADPTMSG];
+				memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
+				memcpy(&adaptermsg[0], (uint8_t *) irsp,
+				       MAX_MSG_DATA);
+				dev_warn(&((phba->pcidev)->dev),
+					 "lpfc%d: %s\n",
+					 phba->brd_no, adaptermsg);
+			} else {
+				/* Unknown IOCB command */
+				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+						"0334 Unknown IOCB command "
+						"Data: x%x, x%x x%x x%x x%x\n",
+						type, irsp->ulpCommand,
+						irsp->ulpStatus,
+						irsp->ulpIoTag,
+						irsp->ulpContext);
+			}
+			break;
+		}
+
+		/*
+		 * The response IOCB has been processed.  Update the ring
+		 * pointer in SLIM.  If the port response put pointer has not
+		 * been updated, sync the pgp->rspPutInx and fetch the new port
+		 * response put pointer.
+		 */
+		writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
+
+		if (pring->rspidx == portRspPut)
+			portRspPut = le32_to_cpu(pgp->rspPutInx);
+	}
+
+	if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
+		pring->stats.iocb_rsp_full++;
+		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
+		writel(status, phba->CAregaddr);
+		readl(phba->CAregaddr);
+	}
+	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
+		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
+		pring->stats.iocb_cmd_empty++;
+
+		/* Force update of the local copy of cmdGetInx */
+		pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
+		lpfc_sli_resume_iocb(phba, pring);
+
+		if ((pring->lpfc_sli_cmd_available))
+			(pring->lpfc_sli_cmd_available) (phba, pring);
+
+	}
+
+	phba->fcp_ring_in_use = 0;
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return rc;
+}
+
+/**
+ * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @rspiocbp: Pointer to driver response IOCB object.
+ *
+ * This function is called from the worker thread when there is a slow-path
+ * response IOCB to process. This function chains all the response iocbs until
+ * seeing the iocb with the LE bit set. The function will call
+ * lpfc_sli_process_sol_iocb function if the response iocb indicates a
+ * completion of a command iocb. The function will call the
+ * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
+ * The function frees the resources or calls the completion handler if this
+ * iocb is an abort completion. The function returns NULL when the response
+ * iocb has the LE bit set and all the chained iocbs are processed, otherwise
+ * this function shall chain the iocb on to the iocb_continueq and return the
+ * response iocb passed in.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			struct lpfc_iocbq *rspiocbp)
+{
+	struct lpfc_iocbq *saveq;
+	struct lpfc_iocbq *cmdiocbp;
+	struct lpfc_iocbq *next_iocb;
+	IOCB_t *irsp = NULL;
+	uint32_t free_saveq;
+	uint8_t iocb_cmd_type;
+	lpfc_iocb_type type;
+	unsigned long iflag;
+	int rc;
+
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	/* First add the response iocb to the countinueq list */
+	list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
+	pring->iocb_continueq_cnt++;
+
+	/* Now, determine whether the list is completed for processing */
+	irsp = &rspiocbp->iocb;
+	if (irsp->ulpLe) {
+		/*
+		 * By default, the driver expects to free all resources
+		 * associated with this iocb completion.
+		 */
+		free_saveq = 1;
+		saveq = list_get_first(&pring->iocb_continueq,
+				       struct lpfc_iocbq, list);
+		irsp = &(saveq->iocb);
+		list_del_init(&pring->iocb_continueq);
+		pring->iocb_continueq_cnt = 0;
+
+		pring->stats.iocb_rsp++;
+
+		/*
+		 * If resource errors reported from HBA, reduce
+		 * queuedepths of the SCSI device.
+		 */
+		if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+		    (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			phba->lpfc_rampdown_queue_depth(phba);
+			spin_lock_irqsave(&phba->hbalock, iflag);
+		}
+
+		if (irsp->ulpStatus) {
+			/* Rsp ring <ringno> error: IOCB */
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					"0328 Rsp Ring %d error: "
+					"IOCB Data: "
+					"x%x x%x x%x x%x "
+					"x%x x%x x%x x%x "
+					"x%x x%x x%x x%x "
+					"x%x x%x x%x x%x\n",
+					pring->ringno,
+					irsp->un.ulpWord[0],
+					irsp->un.ulpWord[1],
+					irsp->un.ulpWord[2],
+					irsp->un.ulpWord[3],
+					irsp->un.ulpWord[4],
+					irsp->un.ulpWord[5],
+					*(((uint32_t *) irsp) + 6),
+					*(((uint32_t *) irsp) + 7),
+					*(((uint32_t *) irsp) + 8),
+					*(((uint32_t *) irsp) + 9),
+					*(((uint32_t *) irsp) + 10),
+					*(((uint32_t *) irsp) + 11),
+					*(((uint32_t *) irsp) + 12),
+					*(((uint32_t *) irsp) + 13),
+					*(((uint32_t *) irsp) + 14),
+					*(((uint32_t *) irsp) + 15));
+		}
+
+		/*
+		 * Fetch the IOCB command type and call the correct completion
+		 * routine. Solicited and Unsolicited IOCBs on the ELS ring
+		 * get freed back to the lpfc_iocb_list by the discovery
+		 * kernel thread.
+		 */
+		iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
+		type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
+		switch (type) {
+		case LPFC_SOL_IOCB:
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
+			spin_lock_irqsave(&phba->hbalock, iflag);
+			break;
+
+		case LPFC_UNSOL_IOCB:
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
+			spin_lock_irqsave(&phba->hbalock, iflag);
+			if (!rc)
+				free_saveq = 0;
+			break;
+
+		case LPFC_ABORT_IOCB:
+			cmdiocbp = NULL;
+			if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
+				cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
+								 saveq);
+			if (cmdiocbp) {
+				/* Call the specified completion routine */
+				if (cmdiocbp->iocb_cmpl) {
+					spin_unlock_irqrestore(&phba->hbalock,
+							       iflag);
+					(cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
+							      saveq);
+					spin_lock_irqsave(&phba->hbalock,
+							  iflag);
+				} else
+					__lpfc_sli_release_iocbq(phba,
+								 cmdiocbp);
+			}
+			break;
+
+		case LPFC_UNKNOWN_IOCB:
+			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
+				char adaptermsg[LPFC_MAX_ADPTMSG];
+				memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
+				memcpy(&adaptermsg[0], (uint8_t *)irsp,
+				       MAX_MSG_DATA);
+				dev_warn(&((phba->pcidev)->dev),
+					 "lpfc%d: %s\n",
+					 phba->brd_no, adaptermsg);
+			} else {
+				/* Unknown IOCB command */
+				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+						"0335 Unknown IOCB "
+						"command Data: x%x "
+						"x%x x%x x%x\n",
+						irsp->ulpCommand,
+						irsp->ulpStatus,
+						irsp->ulpIoTag,
+						irsp->ulpContext);
+			}
+			break;
+		}
+
+		if (free_saveq) {
+			list_for_each_entry_safe(rspiocbp, next_iocb,
+						 &saveq->list, list) {
+				list_del(&rspiocbp->list);
+				__lpfc_sli_release_iocbq(phba, rspiocbp);
+			}
+			__lpfc_sli_release_iocbq(phba, saveq);
+		}
+		rspiocbp = NULL;
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return rspiocbp;
+}
+
+/**
+ * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This routine wraps the actual slow_ring event process routine from the
+ * API jump table function pointer from the lpfc_hba struct.
+ **/
+void
+lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
+				struct lpfc_sli_ring *pring, uint32_t mask)
+{
+	phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
+}
+
+/**
+ * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This function is called from the worker thread when there is a ring event
+ * for non-fcp rings. The caller does not hold any lock. The function will
+ * remove each response iocb in the response ring and calls the handle
+ * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
+ **/
+static void
+lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
+				   struct lpfc_sli_ring *pring, uint32_t mask)
+{
+	struct lpfc_pgp *pgp;
+	IOCB_t *entry;
+	IOCB_t *irsp = NULL;
+	struct lpfc_iocbq *rspiocbp = NULL;
+	uint32_t portRspPut, portRspMax;
+	unsigned long iflag;
+	uint32_t status;
+
+	pgp = &phba->port_gp[pring->ringno];
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	pring->stats.iocb_event++;
+
+	/*
+	 * The next available response entry should never exceed the maximum
+	 * entries.  If it does, treat it as an adapter hardware error.
+	 */
+	portRspMax = pring->numRiocb;
+	portRspPut = le32_to_cpu(pgp->rspPutInx);
+	if (portRspPut >= portRspMax) {
+		/*
+		 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
+		 * rsp ring <portRspMax>
+		 */
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0303 Ring %d handler: portRspPut %d "
+				"is bigger than rsp ring %d\n",
+				pring->ringno, portRspPut, portRspMax);
+
+		phba->link_state = LPFC_HBA_ERROR;
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+		phba->work_hs = HS_FFER3;
+		lpfc_handle_eratt(phba);
+
+		return;
+	}
+
+	rmb();
+	while (pring->rspidx != portRspPut) {
+		/*
+		 * Build a completion list and call the appropriate handler.
+		 * The process is to get the next available response iocb, get
+		 * a free iocb from the list, copy the response data into the
+		 * free iocb, insert to the continuation list, and update the
+		 * next response index to slim.  This process makes response
+		 * iocb's in the ring available to DMA as fast as possible but
+		 * pays a penalty for a copy operation.  Since the iocb is
+		 * only 32 bytes, this penalty is considered small relative to
+		 * the PCI reads for register values and a slim write.  When
+		 * the ulpLe field is set, the entire Command has been
+		 * received.
+		 */
+		entry = lpfc_resp_iocb(phba, pring);
+
+		phba->last_completion_time = jiffies;
+		rspiocbp = __lpfc_sli_get_iocbq(phba);
+		if (rspiocbp == NULL) {
+			printk(KERN_ERR "%s: out of buffers! Failing "
+			       "completion.\n", __func__);
+			break;
+		}
+
+		lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
+				      phba->iocb_rsp_size);
+		irsp = &rspiocbp->iocb;
+
+		if (++pring->rspidx >= portRspMax)
+			pring->rspidx = 0;
+
+		if (pring->ringno == LPFC_ELS_RING) {
+			lpfc_debugfs_slow_ring_trc(phba,
+			"IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
+				*(((uint32_t *) irsp) + 4),
+				*(((uint32_t *) irsp) + 6),
+				*(((uint32_t *) irsp) + 7));
+		}
+
+		writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
+
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		/* Handle the response IOCB */
+		rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
+		spin_lock_irqsave(&phba->hbalock, iflag);
+
+		/*
+		 * If the port response put pointer has not been updated, sync
+		 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
+		 * response put pointer.
+		 */
+		if (pring->rspidx == portRspPut) {
+			portRspPut = le32_to_cpu(pgp->rspPutInx);
+		}
+	} /* while (pring->rspidx != portRspPut) */
+
+	if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
+		/* At least one response entry has been freed */
+		pring->stats.iocb_rsp_full++;
+		/* SET RxRE_RSP in Chip Att register */
+		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
+		writel(status, phba->CAregaddr);
+		readl(phba->CAregaddr); /* flush */
+	}
+	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
+		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
+		pring->stats.iocb_cmd_empty++;
+
+		/* Force update of the local copy of cmdGetInx */
+		pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
+		lpfc_sli_resume_iocb(phba, pring);
+
+		if ((pring->lpfc_sli_cmd_available))
+			(pring->lpfc_sli_cmd_available) (phba, pring);
+
+	}
+
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return;
+}
+
+/**
+ * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This function is called from the worker thread when there is a pending
+ * ELS response iocb on the driver internal slow-path response iocb worker
+ * queue. The caller does not hold any lock. The function will remove each
+ * response iocb from the response worker queue and calls the handle
+ * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
+ **/
+static void
+lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
+				   struct lpfc_sli_ring *pring, uint32_t mask)
+{
+	struct lpfc_iocbq *irspiocbq;
+	struct hbq_dmabuf *dmabuf;
+	struct lpfc_cq_event *cq_event;
+	unsigned long iflag;
+
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
+		/* Get the response iocb from the head of work queue */
+		spin_lock_irqsave(&phba->hbalock, iflag);
+		list_remove_head(&phba->sli4_hba.sp_queue_event,
+				 cq_event, struct lpfc_cq_event, list);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+		switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
+		case CQE_CODE_COMPL_WQE:
+			irspiocbq = container_of(cq_event, struct lpfc_iocbq,
+						 cq_event);
+			/* Translate ELS WCQE to response IOCBQ */
+			irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
+								   irspiocbq);
+			if (irspiocbq)
+				lpfc_sli_sp_handle_rspiocb(phba, pring,
+							   irspiocbq);
+			break;
+		case CQE_CODE_RECEIVE:
+		case CQE_CODE_RECEIVE_V1:
+			dmabuf = container_of(cq_event, struct hbq_dmabuf,
+					      cq_event);
+			lpfc_sli4_handle_received_buffer(phba, dmabuf);
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/**
+ * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function aborts all iocbs in the given ring and frees all the iocb
+ * objects in txq. This function issues an abort iocb for all the iocb commands
+ * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
+ * the return of this function. The caller is not required to hold any locks.
+ **/
+void
+lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	LIST_HEAD(completions);
+	struct lpfc_iocbq *iocb, *next_iocb;
+
+	if (pring->ringno == LPFC_ELS_RING) {
+		lpfc_fabric_abort_hba(phba);
+	}
+
+	/* Error everything on txq and txcmplq
+	 * First do the txq.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	list_splice_init(&pring->txq, &completions);
+	pring->txq_cnt = 0;
+
+	/* Next issue ABTS for everything on the txcmplq */
+	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+		lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_ABORTED);
+}
+
+/**
+ * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
+ * @phba: Pointer to HBA context object.
+ *
+ * This function flushes all iocbs in the fcp ring and frees all the iocb
+ * objects in txq and txcmplq. This function will not issue abort iocbs
+ * for all the iocb commands in txcmplq, they will just be returned with
+ * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
+ * slot has been permanently disabled.
+ **/
+void
+lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
+{
+	LIST_HEAD(txq);
+	LIST_HEAD(txcmplq);
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring  *pring;
+
+	/* Currently, only one fcp ring */
+	pring = &psli->ring[psli->fcp_ring];
+
+	spin_lock_irq(&phba->hbalock);
+	/* Retrieve everything on txq */
+	list_splice_init(&pring->txq, &txq);
+	pring->txq_cnt = 0;
+
+	/* Retrieve everything on the txcmplq */
+	list_splice_init(&pring->txcmplq, &txcmplq);
+	pring->txcmplq_cnt = 0;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Flush the txq */
+	lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_DOWN);
+
+	/* Flush the txcmpq */
+	lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_DOWN);
+}
+
+/**
+ * lpfc_sli_brdready_s3 - Check for sli3 host ready status
+ * @phba: Pointer to HBA context object.
+ * @mask: Bit mask to be checked.
+ *
+ * This function reads the host status register and compares
+ * with the provided bit mask to check if HBA completed
+ * the restart. This function will wait in a loop for the
+ * HBA to complete restart. If the HBA does not restart within
+ * 15 iterations, the function will reset the HBA again. The
+ * function returns 1 when HBA fail to restart otherwise returns
+ * zero.
+ **/
+static int
+lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
+{
+	uint32_t status;
+	int i = 0;
+	int retval = 0;
+
+	/* Read the HBA Host Status Register */
+	if (lpfc_readl(phba->HSregaddr, &status))
+		return 1;
+
+	/*
+	 * Check status register every 100ms for 5 retries, then every
+	 * 500ms for 5, then every 2.5 sec for 5, then reset board and
+	 * every 2.5 sec for 4.
+	 * Break our of the loop if errors occurred during init.
+	 */
+	while (((status & mask) != mask) &&
+	       !(status & HS_FFERM) &&
+	       i++ < 20) {
+
+		if (i <= 5)
+			msleep(10);
+		else if (i <= 10)
+			msleep(500);
+		else
+			msleep(2500);
+
+		if (i == 15) {
+				/* Do post */
+			phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+			lpfc_sli_brdrestart(phba);
+		}
+		/* Read the HBA Host Status Register */
+		if (lpfc_readl(phba->HSregaddr, &status)) {
+			retval = 1;
+			break;
+		}
+	}
+
+	/* Check to see if any errors occurred during init */
+	if ((status & HS_FFERM) || (i >= 20)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2751 Adapter failed to restart, "
+				"status reg x%x, FW Data: A8 x%x AC x%x\n",
+				status,
+				readl(phba->MBslimaddr + 0xa8),
+				readl(phba->MBslimaddr + 0xac));
+		phba->link_state = LPFC_HBA_ERROR;
+		retval = 1;
+	}
+
+	return retval;
+}
+
+/**
+ * lpfc_sli_brdready_s4 - Check for sli4 host ready status
+ * @phba: Pointer to HBA context object.
+ * @mask: Bit mask to be checked.
+ *
+ * This function checks the host status register to check if HBA is
+ * ready. This function will wait in a loop for the HBA to be ready
+ * If the HBA is not ready , the function will will reset the HBA PCI
+ * function again. The function returns 1 when HBA fail to be ready
+ * otherwise returns zero.
+ **/
+static int
+lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
+{
+	uint32_t status;
+	int retval = 0;
+
+	/* Read the HBA Host Status Register */
+	status = lpfc_sli4_post_status_check(phba);
+
+	if (status) {
+		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+		lpfc_sli_brdrestart(phba);
+		status = lpfc_sli4_post_status_check(phba);
+	}
+
+	/* Check to see if any errors occurred during init */
+	if (status) {
+		phba->link_state = LPFC_HBA_ERROR;
+		retval = 1;
+	} else
+		phba->sli4_hba.intr_enable = 0;
+
+	return retval;
+}
+
+/**
+ * lpfc_sli_brdready - Wrapper func for checking the hba readyness
+ * @phba: Pointer to HBA context object.
+ * @mask: Bit mask to be checked.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
+ * from the API jump table function pointer from the lpfc_hba struct.
+ **/
+int
+lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
+{
+	return phba->lpfc_sli_brdready(phba, mask);
+}
+
+#define BARRIER_TEST_PATTERN (0xdeadbeef)
+
+/**
+ * lpfc_reset_barrier - Make HBA ready for HBA reset
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called before resetting an HBA. This function is called
+ * with hbalock held and requests HBA to quiesce DMAs before a reset.
+ **/
+void lpfc_reset_barrier(struct lpfc_hba *phba)
+{
+	uint32_t __iomem *resp_buf;
+	uint32_t __iomem *mbox_buf;
+	volatile uint32_t mbox;
+	uint32_t hc_copy, ha_copy, resp_data;
+	int  i;
+	uint8_t hdrtype;
+
+	pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
+	if (hdrtype != 0x80 ||
+	    (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
+	     FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
+		return;
+
+	/*
+	 * Tell the other part of the chip to suspend temporarily all
+	 * its DMA activity.
+	 */
+	resp_buf = phba->MBslimaddr;
+
+	/* Disable the error attention */
+	if (lpfc_readl(phba->HCregaddr, &hc_copy))
+		return;
+	writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+	phba->link_flag |= LS_IGNORE_ERATT;
+
+	if (lpfc_readl(phba->HAregaddr, &ha_copy))
+		return;
+	if (ha_copy & HA_ERATT) {
+		/* Clear Chip error bit */
+		writel(HA_ERATT, phba->HAregaddr);
+		phba->pport->stopped = 1;
+	}
+
+	mbox = 0;
+	((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
+	((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
+
+	writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
+	mbox_buf = phba->MBslimaddr;
+	writel(mbox, mbox_buf);
+
+	for (i = 0; i < 50; i++) {
+		if (lpfc_readl((resp_buf + 1), &resp_data))
+			return;
+		if (resp_data != ~(BARRIER_TEST_PATTERN))
+			mdelay(1);
+		else
+			break;
+	}
+	resp_data = 0;
+	if (lpfc_readl((resp_buf + 1), &resp_data))
+		return;
+	if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
+		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
+		    phba->pport->stopped)
+			goto restore_hc;
+		else
+			goto clear_errat;
+	}
+
+	((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
+	resp_data = 0;
+	for (i = 0; i < 500; i++) {
+		if (lpfc_readl(resp_buf, &resp_data))
+			return;
+		if (resp_data != mbox)
+			mdelay(1);
+		else
+			break;
+	}
+
+clear_errat:
+
+	while (++i < 500) {
+		if (lpfc_readl(phba->HAregaddr, &ha_copy))
+			return;
+		if (!(ha_copy & HA_ERATT))
+			mdelay(1);
+		else
+			break;
+	}
+
+	if (readl(phba->HAregaddr) & HA_ERATT) {
+		writel(HA_ERATT, phba->HAregaddr);
+		phba->pport->stopped = 1;
+	}
+
+restore_hc:
+	phba->link_flag &= ~LS_IGNORE_ERATT;
+	writel(hc_copy, phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+}
+
+/**
+ * lpfc_sli_brdkill - Issue a kill_board mailbox command
+ * @phba: Pointer to HBA context object.
+ *
+ * This function issues a kill_board mailbox command and waits for
+ * the error attention interrupt. This function is called for stopping
+ * the firmware processing. The caller is not required to hold any
+ * locks. This function calls lpfc_hba_down_post function to free
+ * any pending commands after the kill. The function will return 1 when it
+ * fails to kill the board else will return 0.
+ **/
+int
+lpfc_sli_brdkill(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli;
+	LPFC_MBOXQ_t *pmb;
+	uint32_t status;
+	uint32_t ha_copy;
+	int retval;
+	int i = 0;
+
+	psli = &phba->sli;
+
+	/* Kill HBA */
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"0329 Kill HBA Data: x%x x%x\n",
+			phba->pport->port_state, psli->sli_flag);
+
+	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb)
+		return 1;
+
+	/* Disable the error attention */
+	spin_lock_irq(&phba->hbalock);
+	if (lpfc_readl(phba->HCregaddr, &status)) {
+		spin_unlock_irq(&phba->hbalock);
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return 1;
+	}
+	status &= ~HC_ERINT_ENA;
+	writel(status, phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+	phba->link_flag |= LS_IGNORE_ERATT;
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_kill_board(phba, pmb);
+	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+
+	if (retval != MBX_SUCCESS) {
+		if (retval != MBX_BUSY)
+			mempool_free(pmb, phba->mbox_mem_pool);
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2752 KILL_BOARD command failed retval %d\n",
+				retval);
+		spin_lock_irq(&phba->hbalock);
+		phba->link_flag &= ~LS_IGNORE_ERATT;
+		spin_unlock_irq(&phba->hbalock);
+		return 1;
+	}
+
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+	spin_unlock_irq(&phba->hbalock);
+
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+	/* There is no completion for a KILL_BOARD mbox cmd. Check for an error
+	 * attention every 100ms for 3 seconds. If we don't get ERATT after
+	 * 3 seconds we still set HBA_ERROR state because the status of the
+	 * board is now undefined.
+	 */
+	if (lpfc_readl(phba->HAregaddr, &ha_copy))
+		return 1;
+	while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
+		mdelay(100);
+		if (lpfc_readl(phba->HAregaddr, &ha_copy))
+			return 1;
+	}
+
+	del_timer_sync(&psli->mbox_tmo);
+	if (ha_copy & HA_ERATT) {
+		writel(HA_ERATT, phba->HAregaddr);
+		phba->pport->stopped = 1;
+	}
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+	psli->mbox_active = NULL;
+	phba->link_flag &= ~LS_IGNORE_ERATT;
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_hba_down_post(phba);
+	phba->link_state = LPFC_HBA_ERROR;
+
+	return ha_copy & HA_ERATT ? 0 : 1;
+}
+
+/**
+ * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
+ * @phba: Pointer to HBA context object.
+ *
+ * This function resets the HBA by writing HC_INITFF to the control
+ * register. After the HBA resets, this function resets all the iocb ring
+ * indices. This function disables PCI layer parity checking during
+ * the reset.
+ * This function returns 0 always.
+ * The caller is not required to hold any locks.
+ **/
+int
+lpfc_sli_brdreset(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli;
+	struct lpfc_sli_ring *pring;
+	uint16_t cfg_value;
+	int i;
+
+	psli = &phba->sli;
+
+	/* Reset HBA */
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"0325 Reset HBA Data: x%x x%x\n",
+			phba->pport->port_state, psli->sli_flag);
+
+	/* perform board reset */
+	phba->fc_eventTag = 0;
+	phba->link_events = 0;
+	phba->pport->fc_myDID = 0;
+	phba->pport->fc_prevDID = 0;
+
+	/* Turn off parity checking and serr during the physical reset */
+	pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
+	pci_write_config_word(phba->pcidev, PCI_COMMAND,
+			      (cfg_value &
+			       ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
+
+	psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
+
+	/* Now toggle INITFF bit in the Host Control Register */
+	writel(HC_INITFF, phba->HCregaddr);
+	mdelay(1);
+	readl(phba->HCregaddr); /* flush */
+	writel(0, phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+
+	/* Restore PCI cmd register */
+	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
+
+	/* Initialize relevant SLI info */
+	for (i = 0; i < psli->num_rings; i++) {
+		pring = &psli->ring[i];
+		pring->flag = 0;
+		pring->rspidx = 0;
+		pring->next_cmdidx  = 0;
+		pring->local_getidx = 0;
+		pring->cmdidx = 0;
+		pring->missbufcnt = 0;
+	}
+
+	phba->link_state = LPFC_WARM_START;
+	return 0;
+}
+
+/**
+ * lpfc_sli4_brdreset - Reset a sli-4 HBA
+ * @phba: Pointer to HBA context object.
+ *
+ * This function resets a SLI4 HBA. This function disables PCI layer parity
+ * checking during resets the device. The caller is not required to hold
+ * any locks.
+ *
+ * This function returns 0 always.
+ **/
+int
+lpfc_sli4_brdreset(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	uint16_t cfg_value;
+
+	/* Reset HBA */
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"0295 Reset HBA Data: x%x x%x\n",
+			phba->pport->port_state, psli->sli_flag);
+
+	/* perform board reset */
+	phba->fc_eventTag = 0;
+	phba->link_events = 0;
+	phba->pport->fc_myDID = 0;
+	phba->pport->fc_prevDID = 0;
+
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag &= ~(LPFC_PROCESS_LA);
+	phba->fcf.fcf_flag = 0;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Now physically reset the device */
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"0389 Performing PCI function reset!\n");
+
+	/* Turn off parity checking and serr during the physical reset */
+	pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
+	pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
+			      ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
+
+	/* Perform FCoE PCI function reset */
+	lpfc_sli4_queue_destroy(phba);
+	lpfc_pci_function_reset(phba);
+
+	/* Restore PCI cmd register */
+	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI initialization code path to
+ * restart the HBA. The caller is not required to hold any lock.
+ * This function writes MBX_RESTART mailbox command to the SLIM and
+ * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
+ * function to free any pending commands. The function enables
+ * POST only during the first initialization. The function returns zero.
+ * The function does not guarantee completion of MBX_RESTART mailbox
+ * command before the return of this function.
+ **/
+static int
+lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
+{
+	MAILBOX_t *mb;
+	struct lpfc_sli *psli;
+	volatile uint32_t word0;
+	void __iomem *to_slim;
+	uint32_t hba_aer_enabled;
+
+	spin_lock_irq(&phba->hbalock);
+
+	/* Take PCIe device Advanced Error Reporting (AER) state */
+	hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
+
+	psli = &phba->sli;
+
+	/* Restart HBA */
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"0337 Restart HBA Data: x%x x%x\n",
+			phba->pport->port_state, psli->sli_flag);
+
+	word0 = 0;
+	mb = (MAILBOX_t *) &word0;
+	mb->mbxCommand = MBX_RESTART;
+	mb->mbxHc = 1;
+
+	lpfc_reset_barrier(phba);
+
+	to_slim = phba->MBslimaddr;
+	writel(*(uint32_t *) mb, to_slim);
+	readl(to_slim); /* flush */
+
+	/* Only skip post after fc_ffinit is completed */
+	if (phba->pport->port_state)
+		word0 = 1;	/* This is really setting up word1 */
+	else
+		word0 = 0;	/* This is really setting up word1 */
+	to_slim = phba->MBslimaddr + sizeof (uint32_t);
+	writel(*(uint32_t *) mb, to_slim);
+	readl(to_slim); /* flush */
+
+	lpfc_sli_brdreset(phba);
+	phba->pport->stopped = 0;
+	phba->link_state = LPFC_INIT_START;
+	phba->hba_flag = 0;
+	spin_unlock_irq(&phba->hbalock);
+
+	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
+	psli->stats_start = get_seconds();
+
+	/* Give the INITFF and Post time to settle. */
+	mdelay(100);
+
+	/* Reset HBA AER if it was enabled, note hba_flag was reset above */
+	if (hba_aer_enabled)
+		pci_disable_pcie_error_reporting(phba->pcidev);
+
+	lpfc_hba_down_post(phba);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI initialization code path to restart
+ * a SLI4 HBA. The caller is not required to hold any lock.
+ * At the end of the function, it calls lpfc_hba_down_post function to
+ * free any pending commands.
+ **/
+static int
+lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	uint32_t hba_aer_enabled;
+
+	/* Restart HBA */
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"0296 Restart HBA Data: x%x x%x\n",
+			phba->pport->port_state, psli->sli_flag);
+
+	/* Take PCIe device Advanced Error Reporting (AER) state */
+	hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
+
+	lpfc_sli4_brdreset(phba);
+
+	spin_lock_irq(&phba->hbalock);
+	phba->pport->stopped = 0;
+	phba->link_state = LPFC_INIT_START;
+	phba->hba_flag = 0;
+	spin_unlock_irq(&phba->hbalock);
+
+	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
+	psli->stats_start = get_seconds();
+
+	/* Reset HBA AER if it was enabled, note hba_flag was reset above */
+	if (hba_aer_enabled)
+		pci_disable_pcie_error_reporting(phba->pcidev);
+
+	lpfc_hba_down_post(phba);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_brdrestart - Wrapper func for restarting hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
+ * API jump table function pointer from the lpfc_hba struct.
+**/
+int
+lpfc_sli_brdrestart(struct lpfc_hba *phba)
+{
+	return phba->lpfc_sli_brdrestart(phba);
+}
+
+/**
+ * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called after a HBA restart to wait for successful
+ * restart of the HBA. Successful restart of the HBA is indicated by
+ * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
+ * iteration, the function will restart the HBA again. The function returns
+ * zero if HBA successfully restarted else returns negative error code.
+ **/
+static int
+lpfc_sli_chipset_init(struct lpfc_hba *phba)
+{
+	uint32_t status, i = 0;
+
+	/* Read the HBA Host Status Register */
+	if (lpfc_readl(phba->HSregaddr, &status))
+		return -EIO;
+
+	/* Check status register to see what current state is */
+	i = 0;
+	while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
+
+		/* Check every 10ms for 10 retries, then every 100ms for 90
+		 * retries, then every 1 sec for 50 retires for a total of
+		 * ~60 seconds before reset the board again and check every
+		 * 1 sec for 50 retries. The up to 60 seconds before the
+		 * board ready is required by the Falcon FIPS zeroization
+		 * complete, and any reset the board in between shall cause
+		 * restart of zeroization, further delay the board ready.
+		 */
+		if (i++ >= 200) {
+			/* Adapter failed to init, timeout, status reg
+			   <status> */
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0436 Adapter failed to init, "
+					"timeout, status reg x%x, "
+					"FW Data: A8 x%x AC x%x\n", status,
+					readl(phba->MBslimaddr + 0xa8),
+					readl(phba->MBslimaddr + 0xac));
+			phba->link_state = LPFC_HBA_ERROR;
+			return -ETIMEDOUT;
+		}
+
+		/* Check to see if any errors occurred during init */
+		if (status & HS_FFERM) {
+			/* ERROR: During chipset initialization */
+			/* Adapter failed to init, chipset, status reg
+			   <status> */
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0437 Adapter failed to init, "
+					"chipset, status reg x%x, "
+					"FW Data: A8 x%x AC x%x\n", status,
+					readl(phba->MBslimaddr + 0xa8),
+					readl(phba->MBslimaddr + 0xac));
+			phba->link_state = LPFC_HBA_ERROR;
+			return -EIO;
+		}
+
+		if (i <= 10)
+			msleep(10);
+		else if (i <= 100)
+			msleep(100);
+		else
+			msleep(1000);
+
+		if (i == 150) {
+			/* Do post */
+			phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+			lpfc_sli_brdrestart(phba);
+		}
+		/* Read the HBA Host Status Register */
+		if (lpfc_readl(phba->HSregaddr, &status))
+			return -EIO;
+	}
+
+	/* Check to see if any errors occurred during init */
+	if (status & HS_FFERM) {
+		/* ERROR: During chipset initialization */
+		/* Adapter failed to init, chipset, status reg <status> */
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0438 Adapter failed to init, chipset, "
+				"status reg x%x, "
+				"FW Data: A8 x%x AC x%x\n", status,
+				readl(phba->MBslimaddr + 0xa8),
+				readl(phba->MBslimaddr + 0xac));
+		phba->link_state = LPFC_HBA_ERROR;
+		return -EIO;
+	}
+
+	/* Clear all interrupt enable conditions */
+	writel(0, phba->HCregaddr);
+	readl(phba->HCregaddr); /* flush */
+
+	/* setup host attn register */
+	writel(0xffffffff, phba->HAregaddr);
+	readl(phba->HAregaddr); /* flush */
+	return 0;
+}
+
+/**
+ * lpfc_sli_hbq_count - Get the number of HBQs to be configured
+ *
+ * This function calculates and returns the number of HBQs required to be
+ * configured.
+ **/
+int
+lpfc_sli_hbq_count(void)
+{
+	return ARRAY_SIZE(lpfc_hbq_defs);
+}
+
+/**
+ * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
+ *
+ * This function adds the number of hbq entries in every HBQ to get
+ * the total number of hbq entries required for the HBA and returns
+ * the total count.
+ **/
+static int
+lpfc_sli_hbq_entry_count(void)
+{
+	int  hbq_count = lpfc_sli_hbq_count();
+	int  count = 0;
+	int  i;
+
+	for (i = 0; i < hbq_count; ++i)
+		count += lpfc_hbq_defs[i]->entry_count;
+	return count;
+}
+
+/**
+ * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
+ *
+ * This function calculates amount of memory required for all hbq entries
+ * to be configured and returns the total memory required.
+ **/
+int
+lpfc_sli_hbq_size(void)
+{
+	return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
+}
+
+/**
+ * lpfc_sli_hbq_setup - configure and initialize HBQs
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called during the SLI initialization to configure
+ * all the HBQs and post buffers to the HBQ. The caller is not
+ * required to hold any locks. This function will return zero if successful
+ * else it will return negative error code.
+ **/
+static int
+lpfc_sli_hbq_setup(struct lpfc_hba *phba)
+{
+	int  hbq_count = lpfc_sli_hbq_count();
+	LPFC_MBOXQ_t *pmb;
+	MAILBOX_t *pmbox;
+	uint32_t hbqno;
+	uint32_t hbq_entry_index;
+
+				/* Get a Mailbox buffer to setup mailbox
+				 * commands for HBA initialization
+				 */
+	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+
+	if (!pmb)
+		return -ENOMEM;
+
+	pmbox = &pmb->u.mb;
+
+	/* Initialize the struct lpfc_sli_hbq structure for each hbq */
+	phba->link_state = LPFC_INIT_MBX_CMDS;
+	phba->hbq_in_use = 1;
+
+	hbq_entry_index = 0;
+	for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
+		phba->hbqs[hbqno].next_hbqPutIdx = 0;
+		phba->hbqs[hbqno].hbqPutIdx      = 0;
+		phba->hbqs[hbqno].local_hbqGetIdx   = 0;
+		phba->hbqs[hbqno].entry_count =
+			lpfc_hbq_defs[hbqno]->entry_count;
+		lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
+			hbq_entry_index, pmb);
+		hbq_entry_index += phba->hbqs[hbqno].entry_count;
+
+		if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+			/* Adapter failed to init, mbxCmd <cmd> CFG_RING,
+			   mbxStatus <status>, ring <num> */
+
+			lpfc_printf_log(phba, KERN_ERR,
+					LOG_SLI | LOG_VPORT,
+					"1805 Adapter failed to init. "
+					"Data: x%x x%x x%x\n",
+					pmbox->mbxCommand,
+					pmbox->mbxStatus, hbqno);
+
+			phba->link_state = LPFC_HBA_ERROR;
+			mempool_free(pmb, phba->mbox_mem_pool);
+			return -ENXIO;
+		}
+	}
+	phba->hbq_count = hbq_count;
+
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+	/* Initially populate or replenish the HBQs */
+	for (hbqno = 0; hbqno < hbq_count; ++hbqno)
+		lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
+	return 0;
+}
+
+/**
+ * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called during the SLI initialization to configure
+ * all the HBQs and post buffers to the HBQ. The caller is not
+ * required to hold any locks. This function will return zero if successful
+ * else it will return negative error code.
+ **/
+static int
+lpfc_sli4_rb_setup(struct lpfc_hba *phba)
+{
+	phba->hbq_in_use = 1;
+	phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
+	phba->hbq_count = 1;
+	/* Initially populate or replenish the HBQs */
+	lpfc_sli_hbqbuf_init_hbqs(phba, 0);
+	return 0;
+}
+
+/**
+ * lpfc_sli_config_port - Issue config port mailbox command
+ * @phba: Pointer to HBA context object.
+ * @sli_mode: sli mode - 2/3
+ *
+ * This function is called by the sli intialization code path
+ * to issue config_port mailbox command. This function restarts the
+ * HBA firmware and issues a config_port mailbox command to configure
+ * the SLI interface in the sli mode specified by sli_mode
+ * variable. The caller is not required to hold any locks.
+ * The function returns 0 if successful, else returns negative error
+ * code.
+ **/
+int
+lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
+{
+	LPFC_MBOXQ_t *pmb;
+	uint32_t resetcount = 0, rc = 0, done = 0;
+
+	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return -ENOMEM;
+	}
+
+	phba->sli_rev = sli_mode;
+	while (resetcount < 2 && !done) {
+		spin_lock_irq(&phba->hbalock);
+		phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+		spin_unlock_irq(&phba->hbalock);
+		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+		lpfc_sli_brdrestart(phba);
+		rc = lpfc_sli_chipset_init(phba);
+		if (rc)
+			break;
+
+		spin_lock_irq(&phba->hbalock);
+		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+		spin_unlock_irq(&phba->hbalock);
+		resetcount++;
+
+		/* Call pre CONFIG_PORT mailbox command initialization.  A
+		 * value of 0 means the call was successful.  Any other
+		 * nonzero value is a failure, but if ERESTART is returned,
+		 * the driver may reset the HBA and try again.
+		 */
+		rc = lpfc_config_port_prep(phba);
+		if (rc == -ERESTART) {
+			phba->link_state = LPFC_LINK_UNKNOWN;
+			continue;
+		} else if (rc)
+			break;
+
+		phba->link_state = LPFC_INIT_MBX_CMDS;
+		lpfc_config_port(phba, pmb);
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+		phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
+					LPFC_SLI3_HBQ_ENABLED |
+					LPFC_SLI3_CRP_ENABLED |
+					LPFC_SLI3_BG_ENABLED |
+					LPFC_SLI3_DSS_ENABLED);
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0442 Adapter failed to init, mbxCmd x%x "
+				"CONFIG_PORT, mbxStatus x%x Data: x%x\n",
+				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
+			spin_lock_irq(&phba->hbalock);
+			phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
+			spin_unlock_irq(&phba->hbalock);
+			rc = -ENXIO;
+		} else {
+			/* Allow asynchronous mailbox command to go through */
+			spin_lock_irq(&phba->hbalock);
+			phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+			spin_unlock_irq(&phba->hbalock);
+			done = 1;
+
+			if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
+			    (pmb->u.mb.un.varCfgPort.gasabt == 0))
+				lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+					"3110 Port did not grant ASABT\n");
+		}
+	}
+	if (!done) {
+		rc = -EINVAL;
+		goto do_prep_failed;
+	}
+	if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
+		if (!pmb->u.mb.un.varCfgPort.cMA) {
+			rc = -ENXIO;
+			goto do_prep_failed;
+		}
+		if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
+			phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
+			phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
+			phba->max_vports = (phba->max_vpi > phba->max_vports) ?
+				phba->max_vpi : phba->max_vports;
+
+		} else
+			phba->max_vpi = 0;
+		phba->fips_level = 0;
+		phba->fips_spec_rev = 0;
+		if (pmb->u.mb.un.varCfgPort.gdss) {
+			phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
+			phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
+			phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"2850 Security Crypto Active. FIPS x%d "
+					"(Spec Rev: x%d)",
+					phba->fips_level, phba->fips_spec_rev);
+		}
+		if (pmb->u.mb.un.varCfgPort.sec_err) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2856 Config Port Security Crypto "
+					"Error: x%x ",
+					pmb->u.mb.un.varCfgPort.sec_err);
+		}
+		if (pmb->u.mb.un.varCfgPort.gerbm)
+			phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
+		if (pmb->u.mb.un.varCfgPort.gcrp)
+			phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
+
+		phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
+		phba->port_gp = phba->mbox->us.s3_pgp.port;
+
+		if (phba->cfg_enable_bg) {
+			if (pmb->u.mb.un.varCfgPort.gbg)
+				phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
+			else
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"0443 Adapter did not grant "
+						"BlockGuard\n");
+		}
+	} else {
+		phba->hbq_get = NULL;
+		phba->port_gp = phba->mbox->us.s2.port;
+		phba->max_vpi = 0;
+	}
+do_prep_failed:
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return rc;
+}
+
+
+/**
+ * lpfc_sli_hba_setup - SLI intialization function
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is the main SLI intialization function. This function
+ * is called by the HBA intialization code, HBA reset code and HBA
+ * error attention handler code. Caller is not required to hold any
+ * locks. This function issues config_port mailbox command to configure
+ * the SLI, setup iocb rings and HBQ rings. In the end the function
+ * calls the config_port_post function to issue init_link mailbox
+ * command and to start the discovery. The function will return zero
+ * if successful, else it will return negative error code.
+ **/
+int
+lpfc_sli_hba_setup(struct lpfc_hba *phba)
+{
+	uint32_t rc;
+	int  mode = 3, i;
+	int longs;
+
+	switch (lpfc_sli_mode) {
+	case 2:
+		if (phba->cfg_enable_npiv) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
+				"1824 NPIV enabled: Override lpfc_sli_mode "
+				"parameter (%d) to auto (0).\n",
+				lpfc_sli_mode);
+			break;
+		}
+		mode = 2;
+		break;
+	case 0:
+	case 3:
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
+				"1819 Unrecognized lpfc_sli_mode "
+				"parameter: %d.\n", lpfc_sli_mode);
+
+		break;
+	}
+
+	rc = lpfc_sli_config_port(phba, mode);
+
+	if (rc && lpfc_sli_mode == 3)
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
+				"1820 Unable to select SLI-3.  "
+				"Not supported by adapter.\n");
+	if (rc && mode != 2)
+		rc = lpfc_sli_config_port(phba, 2);
+	if (rc)
+		goto lpfc_sli_hba_setup_error;
+
+	/* Enable PCIe device Advanced Error Reporting (AER) if configured */
+	if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
+		rc = pci_enable_pcie_error_reporting(phba->pcidev);
+		if (!rc) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"2709 This device supports "
+					"Advanced Error Reporting (AER)\n");
+			spin_lock_irq(&phba->hbalock);
+			phba->hba_flag |= HBA_AER_ENABLED;
+			spin_unlock_irq(&phba->hbalock);
+		} else {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"2708 This device does not support "
+					"Advanced Error Reporting (AER)\n");
+			phba->cfg_aer_support = 0;
+		}
+	}
+
+	if (phba->sli_rev == 3) {
+		phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
+		phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
+	} else {
+		phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
+		phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
+		phba->sli3_options = 0;
+	}
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"0444 Firmware in SLI %x mode. Max_vpi %d\n",
+			phba->sli_rev, phba->max_vpi);
+	rc = lpfc_sli_ring_map(phba);
+
+	if (rc)
+		goto lpfc_sli_hba_setup_error;
+
+	/* Initialize VPIs. */
+	if (phba->sli_rev == LPFC_SLI_REV3) {
+		/*
+		 * The VPI bitmask and physical ID array are allocated
+		 * and initialized once only - at driver load.  A port
+		 * reset doesn't need to reinitialize this memory.
+		 */
+		if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
+			longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
+			phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
+						  GFP_KERNEL);
+			if (!phba->vpi_bmask) {
+				rc = -ENOMEM;
+				goto lpfc_sli_hba_setup_error;
+			}
+
+			phba->vpi_ids = kzalloc(
+					(phba->max_vpi+1) * sizeof(uint16_t),
+					GFP_KERNEL);
+			if (!phba->vpi_ids) {
+				kfree(phba->vpi_bmask);
+				rc = -ENOMEM;
+				goto lpfc_sli_hba_setup_error;
+			}
+			for (i = 0; i < phba->max_vpi; i++)
+				phba->vpi_ids[i] = i;
+		}
+	}
+
+	/* Init HBQs */
+	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+		rc = lpfc_sli_hbq_setup(phba);
+		if (rc)
+			goto lpfc_sli_hba_setup_error;
+	}
+	spin_lock_irq(&phba->hbalock);
+	phba->sli.sli_flag |= LPFC_PROCESS_LA;
+	spin_unlock_irq(&phba->hbalock);
+
+	rc = lpfc_config_port_post(phba);
+	if (rc)
+		goto lpfc_sli_hba_setup_error;
+
+	return rc;
+
+lpfc_sli_hba_setup_error:
+	phba->link_state = LPFC_HBA_ERROR;
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0445 Firmware initialization failed\n");
+	return rc;
+}
+
+/**
+ * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
+ * @phba: Pointer to HBA context object.
+ * @mboxq: mailbox pointer.
+ * This function issue a dump mailbox command to read config region
+ * 23 and parse the records in the region and populate driver
+ * data structure.
+ **/
+static int
+lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *mboxq;
+	struct lpfc_dmabuf *mp;
+	struct lpfc_mqe *mqe;
+	uint32_t data_length;
+	int rc;
+
+	/* Program the default value of vlan_id and fc_map */
+	phba->valid_vlan = 0;
+	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
+	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
+	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
+
+	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq)
+		return -ENOMEM;
+
+	mqe = &mboxq->u.mqe;
+	if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
+		rc = -ENOMEM;
+		goto out_free_mboxq;
+	}
+
+	mp = (struct lpfc_dmabuf *) mboxq->context1;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+			"(%d):2571 Mailbox cmd x%x Status x%x "
+			"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+			"x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+			"CQ: x%x x%x x%x x%x\n",
+			mboxq->vport ? mboxq->vport->vpi : 0,
+			bf_get(lpfc_mqe_command, mqe),
+			bf_get(lpfc_mqe_status, mqe),
+			mqe->un.mb_words[0], mqe->un.mb_words[1],
+			mqe->un.mb_words[2], mqe->un.mb_words[3],
+			mqe->un.mb_words[4], mqe->un.mb_words[5],
+			mqe->un.mb_words[6], mqe->un.mb_words[7],
+			mqe->un.mb_words[8], mqe->un.mb_words[9],
+			mqe->un.mb_words[10], mqe->un.mb_words[11],
+			mqe->un.mb_words[12], mqe->un.mb_words[13],
+			mqe->un.mb_words[14], mqe->un.mb_words[15],
+			mqe->un.mb_words[16], mqe->un.mb_words[50],
+			mboxq->mcqe.word0,
+			mboxq->mcqe.mcqe_tag0, 	mboxq->mcqe.mcqe_tag1,
+			mboxq->mcqe.trailer);
+
+	if (rc) {
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+		rc = -EIO;
+		goto out_free_mboxq;
+	}
+	data_length = mqe->un.mb_words[5];
+	if (data_length > DMP_RGN23_SIZE) {
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+		rc = -EIO;
+		goto out_free_mboxq;
+	}
+
+	lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	rc = 0;
+
+out_free_mboxq:
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to the LPFC_MBOXQ_t structure.
+ * @vpd: pointer to the memory to hold resulting port vpd data.
+ * @vpd_size: On input, the number of bytes allocated to @vpd.
+ *	      On output, the number of data bytes in @vpd.
+ *
+ * This routine executes a READ_REV SLI4 mailbox command.  In
+ * addition, this routine gets the port vpd data.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	-ENOMEM - could not allocated memory.
+ **/
+static int
+lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
+		    uint8_t *vpd, uint32_t *vpd_size)
+{
+	int rc = 0;
+	uint32_t dma_size;
+	struct lpfc_dmabuf *dmabuf;
+	struct lpfc_mqe *mqe;
+
+	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!dmabuf)
+		return -ENOMEM;
+
+	/*
+	 * Get a DMA buffer for the vpd data resulting from the READ_REV
+	 * mailbox command.
+	 */
+	dma_size = *vpd_size;
+	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+					  dma_size,
+					  &dmabuf->phys,
+					  GFP_KERNEL);
+	if (!dmabuf->virt) {
+		kfree(dmabuf);
+		return -ENOMEM;
+	}
+	memset(dmabuf->virt, 0, dma_size);
+
+	/*
+	 * The SLI4 implementation of READ_REV conflicts at word1,
+	 * bits 31:16 and SLI4 adds vpd functionality not present
+	 * in SLI3.  This code corrects the conflicts.
+	 */
+	lpfc_read_rev(phba, mboxq);
+	mqe = &mboxq->u.mqe;
+	mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
+	mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
+	mqe->un.read_rev.word1 &= 0x0000FFFF;
+	bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
+	bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
+
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	if (rc) {
+		dma_free_coherent(&phba->pcidev->dev, dma_size,
+				  dmabuf->virt, dmabuf->phys);
+		kfree(dmabuf);
+		return -EIO;
+	}
+
+	/*
+	 * The available vpd length cannot be bigger than the
+	 * DMA buffer passed to the port.  Catch the less than
+	 * case and update the caller's size.
+	 */
+	if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
+		*vpd_size = mqe->un.read_rev.avail_vpd_len;
+
+	memcpy(vpd, dmabuf->virt, *vpd_size);
+
+	dma_free_coherent(&phba->pcidev->dev, dma_size,
+			  dmabuf->virt, dmabuf->phys);
+	kfree(dmabuf);
+	return 0;
+}
+
+/**
+ * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine retrieves SLI4 device physical port name this PCI function
+ * is attached to.
+ *
+ * Return codes
+ *      0 - sucessful
+ *      otherwise - failed to retrieve physical port name
+ **/
+static int
+lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *mboxq;
+	struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
+	struct lpfc_controller_attribute *cntl_attr;
+	struct lpfc_mbx_get_port_name *get_port_name;
+	void *virtaddr = NULL;
+	uint32_t alloclen, reqlen;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+	char cport_name = 0;
+	int rc;
+
+	/* We assume nothing at this point */
+	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
+	phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
+
+	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq)
+		return -ENOMEM;
+	/* obtain link type and link number via READ_CONFIG */
+	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
+	lpfc_sli4_read_config(phba);
+	if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
+		goto retrieve_ppname;
+
+	/* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
+	reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
+	alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+			LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
+			LPFC_SLI4_MBX_NEMBED);
+	if (alloclen < reqlen) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"3084 Allocated DMA memory size (%d) is "
+				"less than the requested DMA memory size "
+				"(%d)\n", alloclen, reqlen);
+		rc = -ENOMEM;
+		goto out_free_mboxq;
+	}
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	virtaddr = mboxq->sge_array->addr[0];
+	mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
+	shdr = &mbx_cntl_attr->cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"3085 Mailbox x%x (x%x/x%x) failed, "
+				"rc:x%x, status:x%x, add_status:x%x\n",
+				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+				rc, shdr_status, shdr_add_status);
+		rc = -ENXIO;
+		goto out_free_mboxq;
+	}
+	cntl_attr = &mbx_cntl_attr->cntl_attr;
+	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
+	phba->sli4_hba.lnk_info.lnk_tp =
+		bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
+	phba->sli4_hba.lnk_info.lnk_no =
+		bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"3086 lnk_type:%d, lnk_numb:%d\n",
+			phba->sli4_hba.lnk_info.lnk_tp,
+			phba->sli4_hba.lnk_info.lnk_no);
+
+retrieve_ppname:
+	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+		LPFC_MBOX_OPCODE_GET_PORT_NAME,
+		sizeof(struct lpfc_mbx_get_port_name) -
+		sizeof(struct lpfc_sli4_cfg_mhdr),
+		LPFC_SLI4_MBX_EMBED);
+	get_port_name = &mboxq->u.mqe.un.get_port_name;
+	shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
+	bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
+	bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
+		phba->sli4_hba.lnk_info.lnk_tp);
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"3087 Mailbox x%x (x%x/x%x) failed: "
+				"rc:x%x, status:x%x, add_status:x%x\n",
+				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+				rc, shdr_status, shdr_add_status);
+		rc = -ENXIO;
+		goto out_free_mboxq;
+	}
+	switch (phba->sli4_hba.lnk_info.lnk_no) {
+	case LPFC_LINK_NUMBER_0:
+		cport_name = bf_get(lpfc_mbx_get_port_name_name0,
+				&get_port_name->u.response);
+		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
+		break;
+	case LPFC_LINK_NUMBER_1:
+		cport_name = bf_get(lpfc_mbx_get_port_name_name1,
+				&get_port_name->u.response);
+		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
+		break;
+	case LPFC_LINK_NUMBER_2:
+		cport_name = bf_get(lpfc_mbx_get_port_name_name2,
+				&get_port_name->u.response);
+		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
+		break;
+	case LPFC_LINK_NUMBER_3:
+		cport_name = bf_get(lpfc_mbx_get_port_name_name3,
+				&get_port_name->u.response);
+		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
+		break;
+	default:
+		break;
+	}
+
+	if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
+		phba->Port[0] = cport_name;
+		phba->Port[1] = '\0';
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"3091 SLI get port name: %s\n", phba->Port);
+	}
+
+out_free_mboxq:
+	if (rc != MBX_TIMEOUT) {
+		if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
+			lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		else
+			mempool_free(mboxq, phba->mbox_mem_pool);
+	}
+	return rc;
+}
+
+/**
+ * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to explicitly arm the SLI4 device's completion and
+ * event queues
+ **/
+static void
+lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
+{
+	uint8_t fcp_eqidx;
+
+	lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
+	lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
+	fcp_eqidx = 0;
+	if (phba->sli4_hba.fcp_cq) {
+		do
+			lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
+					     LPFC_QUEUE_REARM);
+		while (++fcp_eqidx < phba->cfg_fcp_eq_count);
+	}
+	lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
+	if (phba->sli4_hba.fp_eq) {
+		for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count;
+		     fcp_eqidx++)
+			lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
+					     LPFC_QUEUE_REARM);
+	}
+}
+
+/**
+ * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
+ * @phba: Pointer to HBA context object.
+ * @type: The resource extent type.
+ * @extnt_count: buffer to hold port available extent count.
+ * @extnt_size: buffer to hold element count per extent.
+ *
+ * This function calls the port and retrievs the number of available
+ * extents and their size for a particular extent type.
+ *
+ * Returns: 0 if successful.  Nonzero otherwise.
+ **/
+int
+lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
+			       uint16_t *extnt_count, uint16_t *extnt_size)
+{
+	int rc = 0;
+	uint32_t length;
+	uint32_t mbox_tmo;
+	struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
+	LPFC_MBOXQ_t *mbox;
+
+	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	/* Find out how many extents are available for this resource type */
+	length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
+			 length, LPFC_SLI4_MBX_EMBED);
+
+	/* Send an extents count of 0 - the GET doesn't use it. */
+	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
+					LPFC_SLI4_MBX_EMBED);
+	if (unlikely(rc)) {
+		rc = -EIO;
+		goto err_exit;
+	}
+
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+	}
+	if (unlikely(rc)) {
+		rc = -EIO;
+		goto err_exit;
+	}
+
+	rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
+	if (bf_get(lpfc_mbox_hdr_status,
+		   &rsrc_info->header.cfg_shdr.response)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+				"2930 Failed to get resource extents "
+				"Status 0x%x Add'l Status 0x%x\n",
+				bf_get(lpfc_mbox_hdr_status,
+				       &rsrc_info->header.cfg_shdr.response),
+				bf_get(lpfc_mbox_hdr_add_status,
+				       &rsrc_info->header.cfg_shdr.response));
+		rc = -EIO;
+		goto err_exit;
+	}
+
+	*extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
+			      &rsrc_info->u.rsp);
+	*extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
+			     &rsrc_info->u.rsp);
+ err_exit:
+	mempool_free(mbox, phba->mbox_mem_pool);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ * @type: The extent type to check.
+ *
+ * This function reads the current available extents from the port and checks
+ * if the extent count or extent size has changed since the last access.
+ * Callers use this routine post port reset to understand if there is a
+ * extent reprovisioning requirement.
+ *
+ * Returns:
+ *   -Error: error indicates problem.
+ *   1: Extent count or size has changed.
+ *   0: No changes.
+ **/
+static int
+lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
+{
+	uint16_t curr_ext_cnt, rsrc_ext_cnt;
+	uint16_t size_diff, rsrc_ext_size;
+	int rc = 0;
+	struct lpfc_rsrc_blks *rsrc_entry;
+	struct list_head *rsrc_blk_list = NULL;
+
+	size_diff = 0;
+	curr_ext_cnt = 0;
+	rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
+					    &rsrc_ext_cnt,
+					    &rsrc_ext_size);
+	if (unlikely(rc))
+		return -EIO;
+
+	switch (type) {
+	case LPFC_RSC_TYPE_FCOE_RPI:
+		rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
+		break;
+	case LPFC_RSC_TYPE_FCOE_VPI:
+		rsrc_blk_list = &phba->lpfc_vpi_blk_list;
+		break;
+	case LPFC_RSC_TYPE_FCOE_XRI:
+		rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
+		break;
+	case LPFC_RSC_TYPE_FCOE_VFI:
+		rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
+		break;
+	default:
+		break;
+	}
+
+	list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
+		curr_ext_cnt++;
+		if (rsrc_entry->rsrc_size != rsrc_ext_size)
+			size_diff++;
+	}
+
+	if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
+		rc = 1;
+
+	return rc;
+}
+
+/**
+ * lpfc_sli4_cfg_post_extnts -
+ * @phba: Pointer to HBA context object.
+ * @extnt_cnt - number of available extents.
+ * @type - the extent type (rpi, xri, vfi, vpi).
+ * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
+ * @mbox - pointer to the caller's allocated mailbox structure.
+ *
+ * This function executes the extents allocation request.  It also
+ * takes care of the amount of memory needed to allocate or get the
+ * allocated extents. It is the caller's responsibility to evaluate
+ * the response.
+ *
+ * Returns:
+ *   -Error:  Error value describes the condition found.
+ *   0: if successful
+ **/
+static int
+lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
+			  uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
+{
+	int rc = 0;
+	uint32_t req_len;
+	uint32_t emb_len;
+	uint32_t alloc_len, mbox_tmo;
+
+	/* Calculate the total requested length of the dma memory */
+	req_len = *extnt_cnt * sizeof(uint16_t);
+
+	/*
+	 * Calculate the size of an embedded mailbox.  The uint32_t
+	 * accounts for extents-specific word.
+	 */
+	emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
+		sizeof(uint32_t);
+
+	/*
+	 * Presume the allocation and response will fit into an embedded
+	 * mailbox.  If not true, reconfigure to a non-embedded mailbox.
+	 */
+	*emb = LPFC_SLI4_MBX_EMBED;
+	if (req_len > emb_len) {
+		req_len = *extnt_cnt * sizeof(uint16_t) +
+			sizeof(union lpfc_sli4_cfg_shdr) +
+			sizeof(uint32_t);
+		*emb = LPFC_SLI4_MBX_NEMBED;
+	}
+
+	alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+				     LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
+				     req_len, *emb);
+	if (alloc_len < req_len) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2982 Allocated DMA memory size (x%x) is "
+			"less than the requested DMA memory "
+			"size (x%x)\n", alloc_len, req_len);
+		return -ENOMEM;
+	}
+	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb);
+	if (unlikely(rc))
+		return -EIO;
+
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+	}
+
+	if (unlikely(rc))
+		rc = -EIO;
+	return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
+ * @phba: Pointer to HBA context object.
+ * @type:  The resource extent type to allocate.
+ *
+ * This function allocates the number of elements for the specified
+ * resource type.
+ **/
+static int
+lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
+{
+	bool emb = false;
+	uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
+	uint16_t rsrc_id, rsrc_start, j, k;
+	uint16_t *ids;
+	int i, rc;
+	unsigned long longs;
+	unsigned long *bmask;
+	struct lpfc_rsrc_blks *rsrc_blks;
+	LPFC_MBOXQ_t *mbox;
+	uint32_t length;
+	struct lpfc_id_range *id_array = NULL;
+	void *virtaddr = NULL;
+	struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
+	struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
+	struct list_head *ext_blk_list;
+
+	rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
+					    &rsrc_cnt,
+					    &rsrc_size);
+	if (unlikely(rc))
+		return -EIO;
+
+	if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+			"3009 No available Resource Extents "
+			"for resource type 0x%x: Count: 0x%x, "
+			"Size 0x%x\n", type, rsrc_cnt,
+			rsrc_size);
+		return -ENOMEM;
+	}
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT,
+			"2903 Available Resource Extents "
+			"for resource type 0x%x: Count: 0x%x, "
+			"Size 0x%x\n", type, rsrc_cnt,
+			rsrc_size);
+
+	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox);
+	if (unlikely(rc)) {
+		rc = -EIO;
+		goto err_exit;
+	}
+
+	/*
+	 * Figure out where the response is located.  Then get local pointers
+	 * to the response data.  The port does not guarantee to respond to
+	 * all extents counts request so update the local variable with the
+	 * allocated count from the port.
+	 */
+	if (emb == LPFC_SLI4_MBX_EMBED) {
+		rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
+		id_array = &rsrc_ext->u.rsp.id[0];
+		rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
+	} else {
+		virtaddr = mbox->sge_array->addr[0];
+		n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+		rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
+		id_array = &n_rsrc->id;
+	}
+
+	longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
+	rsrc_id_cnt = rsrc_cnt * rsrc_size;
+
+	/*
+	 * Based on the resource size and count, correct the base and max
+	 * resource values.
+	 */
+	length = sizeof(struct lpfc_rsrc_blks);
+	switch (type) {
+	case LPFC_RSC_TYPE_FCOE_RPI:
+		phba->sli4_hba.rpi_bmask = kzalloc(longs *
+						   sizeof(unsigned long),
+						   GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.rpi_bmask)) {
+			rc = -ENOMEM;
+			goto err_exit;
+		}
+		phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
+						 sizeof(uint16_t),
+						 GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.rpi_ids)) {
+			kfree(phba->sli4_hba.rpi_bmask);
+			rc = -ENOMEM;
+			goto err_exit;
+		}
+
+		/*
+		 * The next_rpi was initialized with the maximum available
+		 * count but the port may allocate a smaller number.  Catch
+		 * that case and update the next_rpi.
+		 */
+		phba->sli4_hba.next_rpi = rsrc_id_cnt;
+
+		/* Initialize local ptrs for common extent processing later. */
+		bmask = phba->sli4_hba.rpi_bmask;
+		ids = phba->sli4_hba.rpi_ids;
+		ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
+		break;
+	case LPFC_RSC_TYPE_FCOE_VPI:
+		phba->vpi_bmask = kzalloc(longs *
+					  sizeof(unsigned long),
+					  GFP_KERNEL);
+		if (unlikely(!phba->vpi_bmask)) {
+			rc = -ENOMEM;
+			goto err_exit;
+		}
+		phba->vpi_ids = kzalloc(rsrc_id_cnt *
+					 sizeof(uint16_t),
+					 GFP_KERNEL);
+		if (unlikely(!phba->vpi_ids)) {
+			kfree(phba->vpi_bmask);
+			rc = -ENOMEM;
+			goto err_exit;
+		}
+
+		/* Initialize local ptrs for common extent processing later. */
+		bmask = phba->vpi_bmask;
+		ids = phba->vpi_ids;
+		ext_blk_list = &phba->lpfc_vpi_blk_list;
+		break;
+	case LPFC_RSC_TYPE_FCOE_XRI:
+		phba->sli4_hba.xri_bmask = kzalloc(longs *
+						   sizeof(unsigned long),
+						   GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.xri_bmask)) {
+			rc = -ENOMEM;
+			goto err_exit;
+		}
+		phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
+						 sizeof(uint16_t),
+						 GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.xri_ids)) {
+			kfree(phba->sli4_hba.xri_bmask);
+			rc = -ENOMEM;
+			goto err_exit;
+		}
+
+		/* Initialize local ptrs for common extent processing later. */
+		bmask = phba->sli4_hba.xri_bmask;
+		ids = phba->sli4_hba.xri_ids;
+		ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
+		break;
+	case LPFC_RSC_TYPE_FCOE_VFI:
+		phba->sli4_hba.vfi_bmask = kzalloc(longs *
+						   sizeof(unsigned long),
+						   GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.vfi_bmask)) {
+			rc = -ENOMEM;
+			goto err_exit;
+		}
+		phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
+						 sizeof(uint16_t),
+						 GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.vfi_ids)) {
+			kfree(phba->sli4_hba.vfi_bmask);
+			rc = -ENOMEM;
+			goto err_exit;
+		}
+
+		/* Initialize local ptrs for common extent processing later. */
+		bmask = phba->sli4_hba.vfi_bmask;
+		ids = phba->sli4_hba.vfi_ids;
+		ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
+		break;
+	default:
+		/* Unsupported Opcode.  Fail call. */
+		id_array = NULL;
+		bmask = NULL;
+		ids = NULL;
+		ext_blk_list = NULL;
+		goto err_exit;
+	}
+
+	/*
+	 * Complete initializing the extent configuration with the
+	 * allocated ids assigned to this function.  The bitmask serves
+	 * as an index into the array and manages the available ids.  The
+	 * array just stores the ids communicated to the port via the wqes.
+	 */
+	for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
+		if ((i % 2) == 0)
+			rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
+					 &id_array[k]);
+		else
+			rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
+					 &id_array[k]);
+
+		rsrc_blks = kzalloc(length, GFP_KERNEL);
+		if (unlikely(!rsrc_blks)) {
+			rc = -ENOMEM;
+			kfree(bmask);
+			kfree(ids);
+			goto err_exit;
+		}
+		rsrc_blks->rsrc_start = rsrc_id;
+		rsrc_blks->rsrc_size = rsrc_size;
+		list_add_tail(&rsrc_blks->list, ext_blk_list);
+		rsrc_start = rsrc_id;
+		if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
+			phba->sli4_hba.scsi_xri_start = rsrc_start +
+				lpfc_sli4_get_els_iocb_cnt(phba);
+
+		while (rsrc_id < (rsrc_start + rsrc_size)) {
+			ids[j] = rsrc_id;
+			rsrc_id++;
+			j++;
+		}
+		/* Entire word processed.  Get next word.*/
+		if ((i % 2) == 1)
+			k++;
+	}
+ err_exit:
+	lpfc_sli4_mbox_cmd_free(phba, mbox);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
+ * @phba: Pointer to HBA context object.
+ * @type: the extent's type.
+ *
+ * This function deallocates all extents of a particular resource type.
+ * SLI4 does not allow for deallocating a particular extent range.  It
+ * is the caller's responsibility to release all kernel memory resources.
+ **/
+static int
+lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
+{
+	int rc;
+	uint32_t length, mbox_tmo = 0;
+	LPFC_MBOXQ_t *mbox;
+	struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
+	struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
+
+	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	/*
+	 * This function sends an embedded mailbox because it only sends the
+	 * the resource type.  All extents of this type are released by the
+	 * port.
+	 */
+	length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
+			 length, LPFC_SLI4_MBX_EMBED);
+
+	/* Send an extents count of 0 - the dealloc doesn't use it. */
+	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
+					LPFC_SLI4_MBX_EMBED);
+	if (unlikely(rc)) {
+		rc = -EIO;
+		goto out_free_mbox;
+	}
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+	}
+	if (unlikely(rc)) {
+		rc = -EIO;
+		goto out_free_mbox;
+	}
+
+	dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
+	if (bf_get(lpfc_mbox_hdr_status,
+		   &dealloc_rsrc->header.cfg_shdr.response)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+				"2919 Failed to release resource extents "
+				"for type %d - Status 0x%x Add'l Status 0x%x. "
+				"Resource memory not released.\n",
+				type,
+				bf_get(lpfc_mbox_hdr_status,
+				    &dealloc_rsrc->header.cfg_shdr.response),
+				bf_get(lpfc_mbox_hdr_add_status,
+				    &dealloc_rsrc->header.cfg_shdr.response));
+		rc = -EIO;
+		goto out_free_mbox;
+	}
+
+	/* Release kernel memory resources for the specific type. */
+	switch (type) {
+	case LPFC_RSC_TYPE_FCOE_VPI:
+		kfree(phba->vpi_bmask);
+		kfree(phba->vpi_ids);
+		bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+				    &phba->lpfc_vpi_blk_list, list) {
+			list_del_init(&rsrc_blk->list);
+			kfree(rsrc_blk);
+		}
+		break;
+	case LPFC_RSC_TYPE_FCOE_XRI:
+		kfree(phba->sli4_hba.xri_bmask);
+		kfree(phba->sli4_hba.xri_ids);
+		bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+				    &phba->sli4_hba.lpfc_xri_blk_list, list) {
+			list_del_init(&rsrc_blk->list);
+			kfree(rsrc_blk);
+		}
+		break;
+	case LPFC_RSC_TYPE_FCOE_VFI:
+		kfree(phba->sli4_hba.vfi_bmask);
+		kfree(phba->sli4_hba.vfi_ids);
+		bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+				    &phba->sli4_hba.lpfc_vfi_blk_list, list) {
+			list_del_init(&rsrc_blk->list);
+			kfree(rsrc_blk);
+		}
+		break;
+	case LPFC_RSC_TYPE_FCOE_RPI:
+		/* RPI bitmask and physical id array are cleaned up earlier. */
+		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
+				    &phba->sli4_hba.lpfc_rpi_blk_list, list) {
+			list_del_init(&rsrc_blk->list);
+			kfree(rsrc_blk);
+		}
+		break;
+	default:
+		break;
+	}
+
+	bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+
+ out_free_mbox:
+	mempool_free(mbox, phba->mbox_mem_pool);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function allocates all SLI4 resource identifiers.
+ **/
+int
+lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
+{
+	int i, rc, error = 0;
+	uint16_t count, base;
+	unsigned long longs;
+
+	if (!phba->sli4_hba.rpi_hdrs_in_use)
+		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+	if (phba->sli4_hba.extents_in_use) {
+		/*
+		 * The port supports resource extents. The XRI, VPI, VFI, RPI
+		 * resource extent count must be read and allocated before
+		 * provisioning the resource id arrays.
+		 */
+		if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
+		    LPFC_IDX_RSRC_RDY) {
+			/*
+			 * Extent-based resources are set - the driver could
+			 * be in a port reset. Figure out if any corrective
+			 * actions need to be taken.
+			 */
+			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+						 LPFC_RSC_TYPE_FCOE_VFI);
+			if (rc != 0)
+				error++;
+			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+						 LPFC_RSC_TYPE_FCOE_VPI);
+			if (rc != 0)
+				error++;
+			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+						 LPFC_RSC_TYPE_FCOE_XRI);
+			if (rc != 0)
+				error++;
+			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
+						 LPFC_RSC_TYPE_FCOE_RPI);
+			if (rc != 0)
+				error++;
+
+			/*
+			 * It's possible that the number of resources
+			 * provided to this port instance changed between
+			 * resets.  Detect this condition and reallocate
+			 * resources.  Otherwise, there is no action.
+			 */
+			if (error) {
+				lpfc_printf_log(phba, KERN_INFO,
+						LOG_MBOX | LOG_INIT,
+						"2931 Detected extent resource "
+						"change.  Reallocating all "
+						"extents.\n");
+				rc = lpfc_sli4_dealloc_extent(phba,
+						 LPFC_RSC_TYPE_FCOE_VFI);
+				rc = lpfc_sli4_dealloc_extent(phba,
+						 LPFC_RSC_TYPE_FCOE_VPI);
+				rc = lpfc_sli4_dealloc_extent(phba,
+						 LPFC_RSC_TYPE_FCOE_XRI);
+				rc = lpfc_sli4_dealloc_extent(phba,
+						 LPFC_RSC_TYPE_FCOE_RPI);
+			} else
+				return 0;
+		}
+
+		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
+		if (unlikely(rc))
+			goto err_exit;
+
+		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
+		if (unlikely(rc))
+			goto err_exit;
+
+		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
+		if (unlikely(rc))
+			goto err_exit;
+
+		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
+		if (unlikely(rc))
+			goto err_exit;
+		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+		       LPFC_IDX_RSRC_RDY);
+		return rc;
+	} else {
+		/*
+		 * The port does not support resource extents.  The XRI, VPI,
+		 * VFI, RPI resource ids were determined from READ_CONFIG.
+		 * Just allocate the bitmasks and provision the resource id
+		 * arrays.  If a port reset is active, the resources don't
+		 * need any action - just exit.
+		 */
+		if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
+		    LPFC_IDX_RSRC_RDY) {
+			lpfc_sli4_dealloc_resource_identifiers(phba);
+			lpfc_sli4_remove_rpis(phba);
+		}
+		/* RPIs. */
+		count = phba->sli4_hba.max_cfg_param.max_rpi;
+		base = phba->sli4_hba.max_cfg_param.rpi_base;
+		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+		phba->sli4_hba.rpi_bmask = kzalloc(longs *
+						   sizeof(unsigned long),
+						   GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.rpi_bmask)) {
+			rc = -ENOMEM;
+			goto err_exit;
+		}
+		phba->sli4_hba.rpi_ids = kzalloc(count *
+						 sizeof(uint16_t),
+						 GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.rpi_ids)) {
+			rc = -ENOMEM;
+			goto free_rpi_bmask;
+		}
+
+		for (i = 0; i < count; i++)
+			phba->sli4_hba.rpi_ids[i] = base + i;
+
+		/* VPIs. */
+		count = phba->sli4_hba.max_cfg_param.max_vpi;
+		base = phba->sli4_hba.max_cfg_param.vpi_base;
+		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+		phba->vpi_bmask = kzalloc(longs *
+					  sizeof(unsigned long),
+					  GFP_KERNEL);
+		if (unlikely(!phba->vpi_bmask)) {
+			rc = -ENOMEM;
+			goto free_rpi_ids;
+		}
+		phba->vpi_ids = kzalloc(count *
+					sizeof(uint16_t),
+					GFP_KERNEL);
+		if (unlikely(!phba->vpi_ids)) {
+			rc = -ENOMEM;
+			goto free_vpi_bmask;
+		}
+
+		for (i = 0; i < count; i++)
+			phba->vpi_ids[i] = base + i;
+
+		/* XRIs. */
+		count = phba->sli4_hba.max_cfg_param.max_xri;
+		base = phba->sli4_hba.max_cfg_param.xri_base;
+		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+		phba->sli4_hba.xri_bmask = kzalloc(longs *
+						   sizeof(unsigned long),
+						   GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.xri_bmask)) {
+			rc = -ENOMEM;
+			goto free_vpi_ids;
+		}
+		phba->sli4_hba.max_cfg_param.xri_used = 0;
+		phba->sli4_hba.xri_count = 0;
+		phba->sli4_hba.xri_ids = kzalloc(count *
+						 sizeof(uint16_t),
+						 GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.xri_ids)) {
+			rc = -ENOMEM;
+			goto free_xri_bmask;
+		}
+
+		for (i = 0; i < count; i++)
+			phba->sli4_hba.xri_ids[i] = base + i;
+
+		/* VFIs. */
+		count = phba->sli4_hba.max_cfg_param.max_vfi;
+		base = phba->sli4_hba.max_cfg_param.vfi_base;
+		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
+		phba->sli4_hba.vfi_bmask = kzalloc(longs *
+						   sizeof(unsigned long),
+						   GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.vfi_bmask)) {
+			rc = -ENOMEM;
+			goto free_xri_ids;
+		}
+		phba->sli4_hba.vfi_ids = kzalloc(count *
+						 sizeof(uint16_t),
+						 GFP_KERNEL);
+		if (unlikely(!phba->sli4_hba.vfi_ids)) {
+			rc = -ENOMEM;
+			goto free_vfi_bmask;
+		}
+
+		for (i = 0; i < count; i++)
+			phba->sli4_hba.vfi_ids[i] = base + i;
+
+		/*
+		 * Mark all resources ready.  An HBA reset doesn't need
+		 * to reset the initialization.
+		 */
+		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+		       LPFC_IDX_RSRC_RDY);
+		return 0;
+	}
+
+ free_vfi_bmask:
+	kfree(phba->sli4_hba.vfi_bmask);
+ free_xri_ids:
+	kfree(phba->sli4_hba.xri_ids);
+ free_xri_bmask:
+	kfree(phba->sli4_hba.xri_bmask);
+ free_vpi_ids:
+	kfree(phba->vpi_ids);
+ free_vpi_bmask:
+	kfree(phba->vpi_bmask);
+ free_rpi_ids:
+	kfree(phba->sli4_hba.rpi_ids);
+ free_rpi_bmask:
+	kfree(phba->sli4_hba.rpi_bmask);
+ err_exit:
+	return rc;
+}
+
+/**
+ * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function allocates the number of elements for the specified
+ * resource type.
+ **/
+int
+lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
+{
+	if (phba->sli4_hba.extents_in_use) {
+		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
+		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
+		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
+		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
+	} else {
+		kfree(phba->vpi_bmask);
+		kfree(phba->vpi_ids);
+		bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+		kfree(phba->sli4_hba.xri_bmask);
+		kfree(phba->sli4_hba.xri_ids);
+		bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+		kfree(phba->sli4_hba.vfi_bmask);
+		kfree(phba->sli4_hba.vfi_ids);
+		bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
+ * @phba: Pointer to HBA context object.
+ * @type: The resource extent type.
+ * @extnt_count: buffer to hold port extent count response
+ * @extnt_size: buffer to hold port extent size response.
+ *
+ * This function calls the port to read the host allocated extents
+ * for a particular type.
+ **/
+int
+lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
+			       uint16_t *extnt_cnt, uint16_t *extnt_size)
+{
+	bool emb;
+	int rc = 0;
+	uint16_t curr_blks = 0;
+	uint32_t req_len, emb_len;
+	uint32_t alloc_len, mbox_tmo;
+	struct list_head *blk_list_head;
+	struct lpfc_rsrc_blks *rsrc_blk;
+	LPFC_MBOXQ_t *mbox;
+	void *virtaddr = NULL;
+	struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
+	struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
+	union  lpfc_sli4_cfg_shdr *shdr;
+
+	switch (type) {
+	case LPFC_RSC_TYPE_FCOE_VPI:
+		blk_list_head = &phba->lpfc_vpi_blk_list;
+		break;
+	case LPFC_RSC_TYPE_FCOE_XRI:
+		blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
+		break;
+	case LPFC_RSC_TYPE_FCOE_VFI:
+		blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
+		break;
+	case LPFC_RSC_TYPE_FCOE_RPI:
+		blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
+		break;
+	default:
+		return -EIO;
+	}
+
+	/* Count the number of extents currently allocatd for this type. */
+	list_for_each_entry(rsrc_blk, blk_list_head, list) {
+		if (curr_blks == 0) {
+			/*
+			 * The GET_ALLOCATED mailbox does not return the size,
+			 * just the count.  The size should be just the size
+			 * stored in the current allocated block and all sizes
+			 * for an extent type are the same so set the return
+			 * value now.
+			 */
+			*extnt_size = rsrc_blk->rsrc_size;
+		}
+		curr_blks++;
+	}
+
+	/* Calculate the total requested length of the dma memory. */
+	req_len = curr_blks * sizeof(uint16_t);
+
+	/*
+	 * Calculate the size of an embedded mailbox.  The uint32_t
+	 * accounts for extents-specific word.
+	 */
+	emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
+		sizeof(uint32_t);
+
+	/*
+	 * Presume the allocation and response will fit into an embedded
+	 * mailbox.  If not true, reconfigure to a non-embedded mailbox.
+	 */
+	emb = LPFC_SLI4_MBX_EMBED;
+	req_len = emb_len;
+	if (req_len > emb_len) {
+		req_len = curr_blks * sizeof(uint16_t) +
+			sizeof(union lpfc_sli4_cfg_shdr) +
+			sizeof(uint32_t);
+		emb = LPFC_SLI4_MBX_NEMBED;
+	}
+
+	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
+
+	alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+				     LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
+				     req_len, emb);
+	if (alloc_len < req_len) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2983 Allocated DMA memory size (x%x) is "
+			"less than the requested DMA memory "
+			"size (x%x)\n", alloc_len, req_len);
+		rc = -ENOMEM;
+		goto err_exit;
+	}
+	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
+	if (unlikely(rc)) {
+		rc = -EIO;
+		goto err_exit;
+	}
+
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+	}
+
+	if (unlikely(rc)) {
+		rc = -EIO;
+		goto err_exit;
+	}
+
+	/*
+	 * Figure out where the response is located.  Then get local pointers
+	 * to the response data.  The port does not guarantee to respond to
+	 * all extents counts request so update the local variable with the
+	 * allocated count from the port.
+	 */
+	if (emb == LPFC_SLI4_MBX_EMBED) {
+		rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
+		shdr = &rsrc_ext->header.cfg_shdr;
+		*extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
+	} else {
+		virtaddr = mbox->sge_array->addr[0];
+		n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+		shdr = &n_rsrc->cfg_shdr;
+		*extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
+	}
+
+	if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+			"2984 Failed to read allocated resources "
+			"for type %d - Status 0x%x Add'l Status 0x%x.\n",
+			type,
+			bf_get(lpfc_mbox_hdr_status, &shdr->response),
+			bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
+		rc = -EIO;
+		goto err_exit;
+	}
+ err_exit:
+	lpfc_sli4_mbox_cmd_free(phba, mbox);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is the main SLI4 device intialization PCI function. This
+ * function is called by the HBA intialization code, HBA reset code and
+ * HBA error attention handler code. Caller is not required to hold any
+ * locks.
+ **/
+int
+lpfc_sli4_hba_setup(struct lpfc_hba *phba)
+{
+	int rc;
+	LPFC_MBOXQ_t *mboxq;
+	struct lpfc_mqe *mqe;
+	uint8_t *vpd;
+	uint32_t vpd_size;
+	uint32_t ftr_rsp = 0;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
+	struct lpfc_vport *vport = phba->pport;
+	struct lpfc_dmabuf *mp;
+
+	/* Perform a PCI function reset to start from clean */
+	rc = lpfc_pci_function_reset(phba);
+	if (unlikely(rc))
+		return -ENODEV;
+
+	/* Check the HBA Host Status Register for readyness */
+	rc = lpfc_sli4_post_status_check(phba);
+	if (unlikely(rc))
+		return -ENODEV;
+	else {
+		spin_lock_irq(&phba->hbalock);
+		phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
+		spin_unlock_irq(&phba->hbalock);
+	}
+
+	/*
+	 * Allocate a single mailbox container for initializing the
+	 * port.
+	 */
+	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq)
+		return -ENOMEM;
+
+	/* Issue READ_REV to collect vpd and FW information. */
+	vpd_size = SLI4_PAGE_SIZE;
+	vpd = kzalloc(vpd_size, GFP_KERNEL);
+	if (!vpd) {
+		rc = -ENOMEM;
+		goto out_free_mbox;
+	}
+
+	rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
+	if (unlikely(rc)) {
+		kfree(vpd);
+		goto out_free_mbox;
+	}
+	mqe = &mboxq->u.mqe;
+	phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
+	if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
+		phba->hba_flag |= HBA_FCOE_MODE;
+	else
+		phba->hba_flag &= ~HBA_FCOE_MODE;
+
+	if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
+		LPFC_DCBX_CEE_MODE)
+		phba->hba_flag |= HBA_FIP_SUPPORT;
+	else
+		phba->hba_flag &= ~HBA_FIP_SUPPORT;
+
+	if (phba->sli_rev != LPFC_SLI_REV4) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+			"0376 READ_REV Error. SLI Level %d "
+			"FCoE enabled %d\n",
+			phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
+		rc = -EIO;
+		kfree(vpd);
+		goto out_free_mbox;
+	}
+
+	/*
+	 * Continue initialization with default values even if driver failed
+	 * to read FCoE param config regions, only read parameters if the
+	 * board is FCoE
+	 */
+	if (phba->hba_flag & HBA_FCOE_MODE &&
+	    lpfc_sli4_read_fcoe_params(phba))
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
+			"2570 Failed to read FCoE parameters\n");
+
+	/*
+	 * Retrieve sli4 device physical port name, failure of doing it
+	 * is considered as non-fatal.
+	 */
+	rc = lpfc_sli4_retrieve_pport_name(phba);
+	if (!rc)
+		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+				"3080 Successful retrieving SLI4 device "
+				"physical port name: %s.\n", phba->Port);
+
+	/*
+	 * Evaluate the read rev and vpd data. Populate the driver
+	 * state with the results. If this routine fails, the failure
+	 * is not fatal as the driver will use generic values.
+	 */
+	rc = lpfc_parse_vpd(phba, vpd, vpd_size);
+	if (unlikely(!rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0377 Error %d parsing vpd. "
+				"Using defaults.\n", rc);
+		rc = 0;
+	}
+	kfree(vpd);
+
+	/* Save information as VPD data */
+	phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
+	phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
+	phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
+	phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
+					 &mqe->un.read_rev);
+	phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
+				       &mqe->un.read_rev);
+	phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
+					    &mqe->un.read_rev);
+	phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
+					   &mqe->un.read_rev);
+	phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
+	memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
+	phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
+	memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
+	phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
+	memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+			"(%d):0380 READ_REV Status x%x "
+			"fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
+			mboxq->vport ? mboxq->vport->vpi : 0,
+			bf_get(lpfc_mqe_status, mqe),
+			phba->vpd.rev.opFwName,
+			phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
+			phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
+
+	/*
+	 * Discover the port's supported feature set and match it against the
+	 * hosts requests.
+	 */
+	lpfc_request_features(phba, mboxq);
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	if (unlikely(rc)) {
+		rc = -EIO;
+		goto out_free_mbox;
+	}
+
+	/*
+	 * The port must support FCP initiator mode as this is the
+	 * only mode running in the host.
+	 */
+	if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+				"0378 No support for fcpi mode.\n");
+		ftr_rsp++;
+	}
+	if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
+		phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
+	else
+		phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
+	/*
+	 * If the port cannot support the host's requested features
+	 * then turn off the global config parameters to disable the
+	 * feature in the driver.  This is not a fatal error.
+	 */
+	phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
+	if (phba->cfg_enable_bg) {
+		if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
+			phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
+		else
+			ftr_rsp++;
+	}
+
+	if (phba->max_vpi && phba->cfg_enable_npiv &&
+	    !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
+		ftr_rsp++;
+
+	if (ftr_rsp) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+				"0379 Feature Mismatch Data: x%08x %08x "
+				"x%x x%x x%x\n", mqe->un.req_ftrs.word2,
+				mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
+				phba->cfg_enable_npiv, phba->max_vpi);
+		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
+			phba->cfg_enable_bg = 0;
+		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
+			phba->cfg_enable_npiv = 0;
+	}
+
+	/* These SLI3 features are assumed in SLI4 */
+	spin_lock_irq(&phba->hbalock);
+	phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
+	spin_unlock_irq(&phba->hbalock);
+
+	/*
+	 * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
+	 * calls depends on these resources to complete port setup.
+	 */
+	rc = lpfc_sli4_alloc_resource_identifiers(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"2920 Failed to alloc Resource IDs "
+				"rc = x%x\n", rc);
+		goto out_free_mbox;
+	}
+	/* update physical xri mappings in the scsi buffers */
+	lpfc_scsi_buf_update(phba);
+
+	/* Read the port's service parameters. */
+	rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
+	if (rc) {
+		phba->link_state = LPFC_HBA_ERROR;
+		rc = -ENOMEM;
+		goto out_free_mbox;
+	}
+
+	mboxq->vport = vport;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	mp = (struct lpfc_dmabuf *) mboxq->context1;
+	if (rc == MBX_SUCCESS) {
+		memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
+		rc = 0;
+	}
+
+	/*
+	 * This memory was allocated by the lpfc_read_sparam routine. Release
+	 * it to the mbuf pool.
+	 */
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	mboxq->context1 = NULL;
+	if (unlikely(rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0382 READ_SPARAM command failed "
+				"status %d, mbxStatus x%x\n",
+				rc, bf_get(lpfc_mqe_status, mqe));
+		phba->link_state = LPFC_HBA_ERROR;
+		rc = -EIO;
+		goto out_free_mbox;
+	}
+
+	lpfc_update_vport_wwn(vport);
+
+	/* Update the fc_host data structures with new wwn. */
+	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
+	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
+
+	/* Register SGL pool to the device using non-embedded mailbox command */
+	if (!phba->sli4_hba.extents_in_use) {
+		rc = lpfc_sli4_post_els_sgl_list(phba);
+		if (unlikely(rc)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"0582 Error %d during els sgl post "
+					"operation\n", rc);
+			rc = -ENODEV;
+			goto out_free_mbox;
+		}
+	} else {
+		rc = lpfc_sli4_post_els_sgl_list_ext(phba);
+		if (unlikely(rc)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"2560 Error %d during els sgl post "
+					"operation\n", rc);
+			rc = -ENODEV;
+			goto out_free_mbox;
+		}
+	}
+
+	/* Register SCSI SGL pool to the device */
+	rc = lpfc_sli4_repost_scsi_sgl_list(phba);
+	if (unlikely(rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0383 Error %d during scsi sgl post "
+				"operation\n", rc);
+		/* Some Scsi buffers were moved to the abort scsi list */
+		/* A pci function reset will repost them */
+		rc = -ENODEV;
+		goto out_free_mbox;
+	}
+
+	/* Post the rpi header region to the device. */
+	rc = lpfc_sli4_post_all_rpi_hdrs(phba);
+	if (unlikely(rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0393 Error %d during rpi post operation\n",
+				rc);
+		rc = -ENODEV;
+		goto out_free_mbox;
+	}
+	lpfc_sli4_node_prep(phba);
+
+	/* Create all the SLI4 queues */
+	rc = lpfc_sli4_queue_create(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3089 Failed to allocate queues\n");
+		rc = -ENODEV;
+		goto out_stop_timers;
+	}
+	/* Set up all the queues to the device */
+	rc = lpfc_sli4_queue_setup(phba);
+	if (unlikely(rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0381 Error %d during queue setup.\n ", rc);
+		goto out_destroy_queue;
+	}
+
+	/* Arm the CQs and then EQs on device */
+	lpfc_sli4_arm_cqeq_intr(phba);
+
+	/* Indicate device interrupt mode */
+	phba->sli4_hba.intr_enable = 1;
+
+	/* Allow asynchronous mailbox command to go through */
+	spin_lock_irq(&phba->hbalock);
+	phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Post receive buffers to the device */
+	lpfc_sli4_rb_setup(phba);
+
+	/* Reset HBA FCF states after HBA reset */
+	phba->fcf.fcf_flag = 0;
+	phba->fcf.current_rec.flag = 0;
+
+	/* Start the ELS watchdog timer */
+	mod_timer(&vport->els_tmofunc,
+		  jiffies + HZ * (phba->fc_ratov * 2));
+
+	/* Start heart beat timer */
+	mod_timer(&phba->hb_tmofunc,
+		  jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+	phba->hb_outstanding = 0;
+	phba->last_completion_time = jiffies;
+
+	/* Start error attention (ERATT) polling timer */
+	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+
+	/* Enable PCIe device Advanced Error Reporting (AER) if configured */
+	if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
+		rc = pci_enable_pcie_error_reporting(phba->pcidev);
+		if (!rc) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"2829 This device supports "
+					"Advanced Error Reporting (AER)\n");
+			spin_lock_irq(&phba->hbalock);
+			phba->hba_flag |= HBA_AER_ENABLED;
+			spin_unlock_irq(&phba->hbalock);
+		} else {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"2830 This device does not support "
+					"Advanced Error Reporting (AER)\n");
+			phba->cfg_aer_support = 0;
+		}
+		rc = 0;
+	}
+
+	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+		/*
+		 * The FC Port needs to register FCFI (index 0)
+		 */
+		lpfc_reg_fcfi(phba, mboxq);
+		mboxq->vport = phba->pport;
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+		if (rc != MBX_SUCCESS)
+			goto out_unset_queue;
+		rc = 0;
+		phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
+					&mboxq->u.mqe.un.reg_fcfi);
+
+		/* Check if the port is configured to be disabled */
+		lpfc_sli_read_link_ste(phba);
+	}
+
+	/*
+	 * The port is ready, set the host's link state to LINK_DOWN
+	 * in preparation for link interrupts.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	phba->link_state = LPFC_LINK_DOWN;
+	spin_unlock_irq(&phba->hbalock);
+	if (!(phba->hba_flag & HBA_FCOE_MODE) &&
+	    (phba->hba_flag & LINK_DISABLED)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
+				"3103 Adapter Link is disabled.\n");
+		lpfc_down_link(phba, mboxq);
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
+					"3104 Adapter failed to issue "
+					"DOWN_LINK mbox cmd, rc:x%x\n", rc);
+			goto out_unset_queue;
+		}
+	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
+		/* don't perform init_link on SLI4 FC port loopback test */
+		if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
+			rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+			if (rc)
+				goto out_unset_queue;
+		}
+	}
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return rc;
+out_unset_queue:
+	/* Unset all the queues set up in this routine when error out */
+	lpfc_sli4_queue_unset(phba);
+out_destroy_queue:
+	lpfc_sli4_queue_destroy(phba);
+out_stop_timers:
+	lpfc_stop_hba_timers(phba);
+out_free_mbox:
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return rc;
+}
+
+/**
+ * lpfc_mbox_timeout - Timeout call back function for mbox timer
+ * @ptr: context object - pointer to hba structure.
+ *
+ * This is the callback function for mailbox timer. The mailbox
+ * timer is armed when a new mailbox command is issued and the timer
+ * is deleted when the mailbox complete. The function is called by
+ * the kernel timer code when a mailbox does not complete within
+ * expected time. This function wakes up the worker thread to
+ * process the mailbox timeout and returns. All the processing is
+ * done by the worker thread function lpfc_mbox_timeout_handler.
+ **/
+void
+lpfc_mbox_timeout(unsigned long ptr)
+{
+	struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
+	unsigned long iflag;
+	uint32_t tmo_posted;
+
+	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
+	tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
+	if (!tmo_posted)
+		phba->pport->work_port_events |= WORKER_MBOX_TMO;
+	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
+
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+	return;
+}
+
+
+/**
+ * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called from worker thread when a mailbox command times out.
+ * The caller is not required to hold any locks. This function will reset the
+ * HBA and recover all the pending commands.
+ **/
+void
+lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
+	MAILBOX_t *mb = &pmbox->u.mb;
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *pring;
+
+	/* Check the pmbox pointer first.  There is a race condition
+	 * between the mbox timeout handler getting executed in the
+	 * worklist and the mailbox actually completing. When this
+	 * race condition occurs, the mbox_active will be NULL.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	if (pmbox == NULL) {
+		lpfc_printf_log(phba, KERN_WARNING,
+				LOG_MBOX | LOG_SLI,
+				"0353 Active Mailbox cleared - mailbox timeout "
+				"exiting\n");
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+
+	/* Mbox cmd <mbxCommand> timeout */
+	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+			"0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
+			mb->mbxCommand,
+			phba->pport->port_state,
+			phba->sli.sli_flag,
+			phba->sli.mbox_active);
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Setting state unknown so lpfc_sli_abort_iocb_ring
+	 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
+	 * it to fail all outstanding SCSI IO.
+	 */
+	spin_lock_irq(&phba->pport->work_port_lock);
+	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+	spin_unlock_irq(&phba->pport->work_port_lock);
+	spin_lock_irq(&phba->hbalock);
+	phba->link_state = LPFC_LINK_UNKNOWN;
+	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+	spin_unlock_irq(&phba->hbalock);
+
+	pring = &psli->ring[psli->fcp_ring];
+	lpfc_sli_abort_iocb_ring(phba, pring);
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+			"0345 Resetting board due to mailbox timeout\n");
+
+	/* Reset the HBA device */
+	lpfc_reset_hba(phba);
+}
+
+/**
+ * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
+ * @phba: Pointer to HBA context object.
+ * @pmbox: Pointer to mailbox object.
+ * @flag: Flag indicating how the mailbox need to be processed.
+ *
+ * This function is called by discovery code and HBA management code
+ * to submit a mailbox command to firmware with SLI-3 interface spec. This
+ * function gets the hbalock to protect the data structures.
+ * The mailbox command can be submitted in polling mode, in which case
+ * this function will wait in a polling loop for the completion of the
+ * mailbox.
+ * If the mailbox is submitted in no_wait mode (not polling) the
+ * function will submit the command and returns immediately without waiting
+ * for the mailbox completion. The no_wait is supported only when HBA
+ * is in SLI2/SLI3 mode - interrupts are enabled.
+ * The SLI interface allows only one mailbox pending at a time. If the
+ * mailbox is issued in polling mode and there is already a mailbox
+ * pending, then the function will return an error. If the mailbox is issued
+ * in NO_WAIT mode and there is a mailbox pending already, the function
+ * will return MBX_BUSY after queuing the mailbox into mailbox queue.
+ * The sli layer owns the mailbox object until the completion of mailbox
+ * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
+ * return codes the caller owns the mailbox command after the return of
+ * the function.
+ **/
+static int
+lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
+		       uint32_t flag)
+{
+	MAILBOX_t *mb;
+	struct lpfc_sli *psli = &phba->sli;
+	uint32_t status, evtctr;
+	uint32_t ha_copy, hc_copy;
+	int i;
+	unsigned long timeout;
+	unsigned long drvr_flag = 0;
+	uint32_t word0, ldata;
+	void __iomem *to_slim;
+	int processing_queue = 0;
+
+	spin_lock_irqsave(&phba->hbalock, drvr_flag);
+	if (!pmbox) {
+		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+		/* processing mbox queue from intr_handler */
+		if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
+			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+			return MBX_SUCCESS;
+		}
+		processing_queue = 1;
+		pmbox = lpfc_mbox_get(phba);
+		if (!pmbox) {
+			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+			return MBX_SUCCESS;
+		}
+	}
+
+	if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
+		pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
+		if(!pmbox->vport) {
+			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+			lpfc_printf_log(phba, KERN_ERR,
+					LOG_MBOX | LOG_VPORT,
+					"1806 Mbox x%x failed. No vport\n",
+					pmbox->u.mb.mbxCommand);
+			dump_stack();
+			goto out_not_finished;
+		}
+	}
+
+	/* If the PCI channel is in offline state, do not post mbox. */
+	if (unlikely(pci_channel_offline(phba->pcidev))) {
+		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+		goto out_not_finished;
+	}
+
+	/* If HBA has a deferred error attention, fail the iocb. */
+	if (unlikely(phba->hba_flag & DEFER_ERATT)) {
+		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+		goto out_not_finished;
+	}
+
+	psli = &phba->sli;
+
+	mb = &pmbox->u.mb;
+	status = MBX_SUCCESS;
+
+	if (phba->link_state == LPFC_HBA_ERROR) {
+		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+
+		/* Mbox command <mbxCommand> cannot issue */
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"(%d):0311 Mailbox command x%x cannot "
+				"issue Data: x%x x%x\n",
+				pmbox->vport ? pmbox->vport->vpi : 0,
+				pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
+		goto out_not_finished;
+	}
+
+	if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
+		if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
+			!(hc_copy & HC_MBINT_ENA)) {
+			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"(%d):2528 Mailbox command x%x cannot "
+				"issue Data: x%x x%x\n",
+				pmbox->vport ? pmbox->vport->vpi : 0,
+				pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
+			goto out_not_finished;
+		}
+	}
+
+	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+		/* Polling for a mbox command when another one is already active
+		 * is not allowed in SLI. Also, the driver must have established
+		 * SLI2 mode to queue and process multiple mbox commands.
+		 */
+
+		if (flag & MBX_POLL) {
+			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+
+			/* Mbox command <mbxCommand> cannot issue */
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"(%d):2529 Mailbox command x%x "
+					"cannot issue Data: x%x x%x\n",
+					pmbox->vport ? pmbox->vport->vpi : 0,
+					pmbox->u.mb.mbxCommand,
+					psli->sli_flag, flag);
+			goto out_not_finished;
+		}
+
+		if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
+			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+			/* Mbox command <mbxCommand> cannot issue */
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"(%d):2530 Mailbox command x%x "
+					"cannot issue Data: x%x x%x\n",
+					pmbox->vport ? pmbox->vport->vpi : 0,
+					pmbox->u.mb.mbxCommand,
+					psli->sli_flag, flag);
+			goto out_not_finished;
+		}
+
+		/* Another mailbox command is still being processed, queue this
+		 * command to be processed later.
+		 */
+		lpfc_mbox_put(phba, pmbox);
+
+		/* Mbox cmd issue - BUSY */
+		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+				"(%d):0308 Mbox cmd issue - BUSY Data: "
+				"x%x x%x x%x x%x\n",
+				pmbox->vport ? pmbox->vport->vpi : 0xffffff,
+				mb->mbxCommand, phba->pport->port_state,
+				psli->sli_flag, flag);
+
+		psli->slistat.mbox_busy++;
+		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+
+		if (pmbox->vport) {
+			lpfc_debugfs_disc_trc(pmbox->vport,
+				LPFC_DISC_TRC_MBOX_VPORT,
+				"MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
+				(uint32_t)mb->mbxCommand,
+				mb->un.varWords[0], mb->un.varWords[1]);
+		}
+		else {
+			lpfc_debugfs_disc_trc(phba->pport,
+				LPFC_DISC_TRC_MBOX,
+				"MBOX Bsy:        cmd:x%x mb:x%x x%x",
+				(uint32_t)mb->mbxCommand,
+				mb->un.varWords[0], mb->un.varWords[1]);
+		}
+
+		return MBX_BUSY;
+	}
+
+	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+
+	/* If we are not polling, we MUST be in SLI2 mode */
+	if (flag != MBX_POLL) {
+		if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
+		    (mb->mbxCommand != MBX_KILL_BOARD)) {
+			psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+			/* Mbox command <mbxCommand> cannot issue */
+			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+					"(%d):2531 Mailbox command x%x "
+					"cannot issue Data: x%x x%x\n",
+					pmbox->vport ? pmbox->vport->vpi : 0,
+					pmbox->u.mb.mbxCommand,
+					psli->sli_flag, flag);
+			goto out_not_finished;
+		}
+		/* timeout active mbox command */
+		mod_timer(&psli->mbox_tmo, (jiffies +
+			       (HZ * lpfc_mbox_tmo_val(phba, pmbox))));
+	}
+
+	/* Mailbox cmd <cmd> issue */
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+			"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
+			"x%x\n",
+			pmbox->vport ? pmbox->vport->vpi : 0,
+			mb->mbxCommand, phba->pport->port_state,
+			psli->sli_flag, flag);
+
+	if (mb->mbxCommand != MBX_HEARTBEAT) {
+		if (pmbox->vport) {
+			lpfc_debugfs_disc_trc(pmbox->vport,
+				LPFC_DISC_TRC_MBOX_VPORT,
+				"MBOX Send vport: cmd:x%x mb:x%x x%x",
+				(uint32_t)mb->mbxCommand,
+				mb->un.varWords[0], mb->un.varWords[1]);
+		}
+		else {
+			lpfc_debugfs_disc_trc(phba->pport,
+				LPFC_DISC_TRC_MBOX,
+				"MBOX Send:       cmd:x%x mb:x%x x%x",
+				(uint32_t)mb->mbxCommand,
+				mb->un.varWords[0], mb->un.varWords[1]);
+		}
+	}
+
+	psli->slistat.mbox_cmd++;
+	evtctr = psli->slistat.mbox_event;
+
+	/* next set own bit for the adapter and copy over command word */
+	mb->mbxOwner = OWN_CHIP;
+
+	if (psli->sli_flag & LPFC_SLI_ACTIVE) {
+		/* Populate mbox extension offset word. */
+		if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
+			*(((uint32_t *)mb) + pmbox->mbox_offset_word)
+				= (uint8_t *)phba->mbox_ext
+				  - (uint8_t *)phba->mbox;
+		}
+
+		/* Copy the mailbox extension data */
+		if (pmbox->in_ext_byte_len && pmbox->context2) {
+			lpfc_sli_pcimem_bcopy(pmbox->context2,
+				(uint8_t *)phba->mbox_ext,
+				pmbox->in_ext_byte_len);
+		}
+		/* Copy command data to host SLIM area */
+		lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
+	} else {
+		/* Populate mbox extension offset word. */
+		if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
+			*(((uint32_t *)mb) + pmbox->mbox_offset_word)
+				= MAILBOX_HBA_EXT_OFFSET;
+
+		/* Copy the mailbox extension data */
+		if (pmbox->in_ext_byte_len && pmbox->context2) {
+			lpfc_memcpy_to_slim(phba->MBslimaddr +
+				MAILBOX_HBA_EXT_OFFSET,
+				pmbox->context2, pmbox->in_ext_byte_len);
+
+		}
+		if (mb->mbxCommand == MBX_CONFIG_PORT) {
+			/* copy command data into host mbox for cmpl */
+			lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
+		}
+
+		/* First copy mbox command data to HBA SLIM, skip past first
+		   word */
+		to_slim = phba->MBslimaddr + sizeof (uint32_t);
+		lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
+			    MAILBOX_CMD_SIZE - sizeof (uint32_t));
+
+		/* Next copy over first word, with mbxOwner set */
+		ldata = *((uint32_t *)mb);
+		to_slim = phba->MBslimaddr;
+		writel(ldata, to_slim);
+		readl(to_slim); /* flush */
+
+		if (mb->mbxCommand == MBX_CONFIG_PORT) {
+			/* switch over to host mailbox */
+			psli->sli_flag |= LPFC_SLI_ACTIVE;
+		}
+	}
+
+	wmb();
+
+	switch (flag) {
+	case MBX_NOWAIT:
+		/* Set up reference to mailbox command */
+		psli->mbox_active = pmbox;
+		/* Interrupt board to do it */
+		writel(CA_MBATT, phba->CAregaddr);
+		readl(phba->CAregaddr); /* flush */
+		/* Don't wait for it to finish, just return */
+		break;
+
+	case MBX_POLL:
+		/* Set up null reference to mailbox command */
+		psli->mbox_active = NULL;
+		/* Interrupt board to do it */
+		writel(CA_MBATT, phba->CAregaddr);
+		readl(phba->CAregaddr); /* flush */
+
+		if (psli->sli_flag & LPFC_SLI_ACTIVE) {
+			/* First read mbox status word */
+			word0 = *((uint32_t *)phba->mbox);
+			word0 = le32_to_cpu(word0);
+		} else {
+			/* First read mbox status word */
+			if (lpfc_readl(phba->MBslimaddr, &word0)) {
+				spin_unlock_irqrestore(&phba->hbalock,
+						       drvr_flag);
+				goto out_not_finished;
+			}
+		}
+
+		/* Read the HBA Host Attention Register */
+		if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
+			spin_unlock_irqrestore(&phba->hbalock,
+						       drvr_flag);
+			goto out_not_finished;
+		}
+		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
+							1000) + jiffies;
+		i = 0;
+		/* Wait for command to complete */
+		while (((word0 & OWN_CHIP) == OWN_CHIP) ||
+		       (!(ha_copy & HA_MBATT) &&
+			(phba->link_state > LPFC_WARM_START))) {
+			if (time_after(jiffies, timeout)) {
+				psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+				spin_unlock_irqrestore(&phba->hbalock,
+						       drvr_flag);
+				goto out_not_finished;
+			}
+
+			/* Check if we took a mbox interrupt while we were
+			   polling */
+			if (((word0 & OWN_CHIP) != OWN_CHIP)
+			    && (evtctr != psli->slistat.mbox_event))
+				break;
+
+			if (i++ > 10) {
+				spin_unlock_irqrestore(&phba->hbalock,
+						       drvr_flag);
+				msleep(1);
+				spin_lock_irqsave(&phba->hbalock, drvr_flag);
+			}
+
+			if (psli->sli_flag & LPFC_SLI_ACTIVE) {
+				/* First copy command data */
+				word0 = *((uint32_t *)phba->mbox);
+				word0 = le32_to_cpu(word0);
+				if (mb->mbxCommand == MBX_CONFIG_PORT) {
+					MAILBOX_t *slimmb;
+					uint32_t slimword0;
+					/* Check real SLIM for any errors */
+					slimword0 = readl(phba->MBslimaddr);
+					slimmb = (MAILBOX_t *) & slimword0;
+					if (((slimword0 & OWN_CHIP) != OWN_CHIP)
+					    && slimmb->mbxStatus) {
+						psli->sli_flag &=
+						    ~LPFC_SLI_ACTIVE;
+						word0 = slimword0;
+					}
+				}
+			} else {
+				/* First copy command data */
+				word0 = readl(phba->MBslimaddr);
+			}
+			/* Read the HBA Host Attention Register */
+			if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
+				spin_unlock_irqrestore(&phba->hbalock,
+						       drvr_flag);
+				goto out_not_finished;
+			}
+		}
+
+		if (psli->sli_flag & LPFC_SLI_ACTIVE) {
+			/* copy results back to user */
+			lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
+			/* Copy the mailbox extension data */
+			if (pmbox->out_ext_byte_len && pmbox->context2) {
+				lpfc_sli_pcimem_bcopy(phba->mbox_ext,
+						      pmbox->context2,
+						      pmbox->out_ext_byte_len);
+			}
+		} else {
+			/* First copy command data */
+			lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
+							MAILBOX_CMD_SIZE);
+			/* Copy the mailbox extension data */
+			if (pmbox->out_ext_byte_len && pmbox->context2) {
+				lpfc_memcpy_from_slim(pmbox->context2,
+					phba->MBslimaddr +
+					MAILBOX_HBA_EXT_OFFSET,
+					pmbox->out_ext_byte_len);
+			}
+		}
+
+		writel(HA_MBATT, phba->HAregaddr);
+		readl(phba->HAregaddr); /* flush */
+
+		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+		status = mb->mbxStatus;
+	}
+
+	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+	return status;
+
+out_not_finished:
+	if (processing_queue) {
+		pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
+		lpfc_mbox_cmpl_put(phba, pmbox);
+	}
+	return MBX_NOT_FINISHED;
+}
+
+/**
+ * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
+ * @phba: Pointer to HBA context object.
+ *
+ * The function blocks the posting of SLI4 asynchronous mailbox commands from
+ * the driver internal pending mailbox queue. It will then try to wait out the
+ * possible outstanding mailbox command before return.
+ *
+ * Returns:
+ * 	0 - the outstanding mailbox command completed; otherwise, the wait for
+ * 	the outstanding mailbox command timed out.
+ **/
+static int
+lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	int rc = 0;
+	unsigned long timeout = 0;
+
+	/* Mark the asynchronous mailbox command posting as blocked */
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
+	/* Determine how long we might wait for the active mailbox
+	 * command to be gracefully completed by firmware.
+	 */
+	if (phba->sli.mbox_active)
+		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
+						phba->sli.mbox_active) *
+						1000) + jiffies;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Wait for the outstnading mailbox command to complete */
+	while (phba->sli.mbox_active) {
+		/* Check active mailbox complete status every 2ms */
+		msleep(2);
+		if (time_after(jiffies, timeout)) {
+			/* Timeout, marked the outstanding cmd not complete */
+			rc = 1;
+			break;
+		}
+	}
+
+	/* Can not cleanly block async mailbox command, fails it */
+	if (rc) {
+		spin_lock_irq(&phba->hbalock);
+		psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+		spin_unlock_irq(&phba->hbalock);
+	}
+	return rc;
+}
+
+/**
+ * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
+ * @phba: Pointer to HBA context object.
+ *
+ * The function unblocks and resume posting of SLI4 asynchronous mailbox
+ * commands from the driver internal pending mailbox queue. It makes sure
+ * that there is no outstanding mailbox command before resuming posting
+ * asynchronous mailbox commands. If, for any reason, there is outstanding
+ * mailbox command, it will try to wait it out before resuming asynchronous
+ * mailbox command posting.
+ **/
+static void
+lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+
+	spin_lock_irq(&phba->hbalock);
+	if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
+		/* Asynchronous mailbox posting is not blocked, do nothing */
+		spin_unlock_irq(&phba->hbalock);
+		return;
+	}
+
+	/* Outstanding synchronous mailbox command is guaranteed to be done,
+	 * successful or timeout, after timing-out the outstanding mailbox
+	 * command shall always be removed, so just unblock posting async
+	 * mailbox command and resume
+	 */
+	psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+	spin_unlock_irq(&phba->hbalock);
+
+	/* wake up worker thread to post asynchronlous mailbox command */
+	lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * The function posts a mailbox to the port.  The mailbox is expected
+ * to be comletely filled in and ready for the port to operate on it.
+ * This routine executes a synchronous completion operation on the
+ * mailbox by polling for its completion.
+ *
+ * The caller must not be holding any locks when calling this routine.
+ *
+ * Returns:
+ *	MBX_SUCCESS - mailbox posted successfully
+ *	Any of the MBX error values.
+ **/
+static int
+lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	int rc = MBX_SUCCESS;
+	unsigned long iflag;
+	uint32_t db_ready;
+	uint32_t mcqe_status;
+	uint32_t mbx_cmnd;
+	unsigned long timeout;
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_mqe *mb = &mboxq->u.mqe;
+	struct lpfc_bmbx_create *mbox_rgn;
+	struct dma_address *dma_address;
+	struct lpfc_register bmbx_reg;
+
+	/*
+	 * Only one mailbox can be active to the bootstrap mailbox region
+	 * at a time and there is no queueing provided.
+	 */
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"(%d):2532 Mailbox command x%x (x%x/x%x) "
+				"cannot issue Data: x%x x%x\n",
+				mboxq->vport ? mboxq->vport->vpi : 0,
+				mboxq->u.mb.mbxCommand,
+				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+				psli->sli_flag, MBX_POLL);
+		return MBXERR_ERROR;
+	}
+	/* The server grabs the token and owns it until release */
+	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+	phba->sli.mbox_active = mboxq;
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+	/*
+	 * Initialize the bootstrap memory region to avoid stale data areas
+	 * in the mailbox post.  Then copy the caller's mailbox contents to
+	 * the bmbx mailbox region.
+	 */
+	mbx_cmnd = bf_get(lpfc_mqe_command, mb);
+	memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
+	lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
+			      sizeof(struct lpfc_mqe));
+
+	/* Post the high mailbox dma address to the port and wait for ready. */
+	dma_address = &phba->sli4_hba.bmbx.dma_address;
+	writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
+
+	timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
+				   * 1000) + jiffies;
+	do {
+		bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
+		db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
+		if (!db_ready)
+			msleep(2);
+
+		if (time_after(jiffies, timeout)) {
+			rc = MBXERR_ERROR;
+			goto exit;
+		}
+	} while (!db_ready);
+
+	/* Post the low mailbox dma address to the port. */
+	writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
+	timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
+				   * 1000) + jiffies;
+	do {
+		bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
+		db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
+		if (!db_ready)
+			msleep(2);
+
+		if (time_after(jiffies, timeout)) {
+			rc = MBXERR_ERROR;
+			goto exit;
+		}
+	} while (!db_ready);
+
+	/*
+	 * Read the CQ to ensure the mailbox has completed.
+	 * If so, update the mailbox status so that the upper layers
+	 * can complete the request normally.
+	 */
+	lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
+			      sizeof(struct lpfc_mqe));
+	mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
+	lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
+			      sizeof(struct lpfc_mcqe));
+	mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
+	/*
+	 * When the CQE status indicates a failure and the mailbox status
+	 * indicates success then copy the CQE status into the mailbox status
+	 * (and prefix it with x4000).
+	 */
+	if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
+		if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
+			bf_set(lpfc_mqe_status, mb,
+			       (LPFC_MBX_ERROR_RANGE | mcqe_status));
+		rc = MBXERR_ERROR;
+	} else
+		lpfc_sli4_swap_str(phba, mboxq);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+			"(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
+			"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
+			" x%x x%x CQ: x%x x%x x%x x%x\n",
+			mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
+			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+			bf_get(lpfc_mqe_status, mb),
+			mb->un.mb_words[0], mb->un.mb_words[1],
+			mb->un.mb_words[2], mb->un.mb_words[3],
+			mb->un.mb_words[4], mb->un.mb_words[5],
+			mb->un.mb_words[6], mb->un.mb_words[7],
+			mb->un.mb_words[8], mb->un.mb_words[9],
+			mb->un.mb_words[10], mb->un.mb_words[11],
+			mb->un.mb_words[12], mboxq->mcqe.word0,
+			mboxq->mcqe.mcqe_tag0, 	mboxq->mcqe.mcqe_tag1,
+			mboxq->mcqe.trailer);
+exit:
+	/* We are holding the token, no needed for lock when release */
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+	phba->sli.mbox_active = NULL;
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return rc;
+}
+
+/**
+ * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
+ * @phba: Pointer to HBA context object.
+ * @pmbox: Pointer to mailbox object.
+ * @flag: Flag indicating how the mailbox need to be processed.
+ *
+ * This function is called by discovery code and HBA management code to submit
+ * a mailbox command to firmware with SLI-4 interface spec.
+ *
+ * Return codes the caller owns the mailbox command after the return of the
+ * function.
+ **/
+static int
+lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
+		       uint32_t flag)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	unsigned long iflags;
+	int rc;
+
+	/* dump from issue mailbox command if setup */
+	lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
+
+	rc = lpfc_mbox_dev_check(phba);
+	if (unlikely(rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"(%d):2544 Mailbox command x%x (x%x/x%x) "
+				"cannot issue Data: x%x x%x\n",
+				mboxq->vport ? mboxq->vport->vpi : 0,
+				mboxq->u.mb.mbxCommand,
+				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+				psli->sli_flag, flag);
+		goto out_not_finished;
+	}
+
+	/* Detect polling mode and jump to a handler */
+	if (!phba->sli4_hba.intr_enable) {
+		if (flag == MBX_POLL)
+			rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
+		else
+			rc = -EIO;
+		if (rc != MBX_SUCCESS)
+			lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+					"(%d):2541 Mailbox command x%x "
+					"(x%x/x%x) cannot issue Data: "
+					"x%x x%x\n",
+					mboxq->vport ? mboxq->vport->vpi : 0,
+					mboxq->u.mb.mbxCommand,
+					lpfc_sli_config_mbox_subsys_get(phba,
+									mboxq),
+					lpfc_sli_config_mbox_opcode_get(phba,
+									mboxq),
+					psli->sli_flag, flag);
+		return rc;
+	} else if (flag == MBX_POLL) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+				"(%d):2542 Try to issue mailbox command "
+				"x%x (x%x/x%x) synchronously ahead of async"
+				"mailbox command queue: x%x x%x\n",
+				mboxq->vport ? mboxq->vport->vpi : 0,
+				mboxq->u.mb.mbxCommand,
+				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+				psli->sli_flag, flag);
+		/* Try to block the asynchronous mailbox posting */
+		rc = lpfc_sli4_async_mbox_block(phba);
+		if (!rc) {
+			/* Successfully blocked, now issue sync mbox cmd */
+			rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
+			if (rc != MBX_SUCCESS)
+				lpfc_printf_log(phba, KERN_ERR,
+					LOG_MBOX | LOG_SLI,
+					"(%d):2597 Mailbox command "
+					"x%x (x%x/x%x) cannot issue "
+					"Data: x%x x%x\n",
+					mboxq->vport ?
+					mboxq->vport->vpi : 0,
+					mboxq->u.mb.mbxCommand,
+					lpfc_sli_config_mbox_subsys_get(phba,
+									mboxq),
+					lpfc_sli_config_mbox_opcode_get(phba,
+									mboxq),
+					psli->sli_flag, flag);
+			/* Unblock the async mailbox posting afterward */
+			lpfc_sli4_async_mbox_unblock(phba);
+		}
+		return rc;
+	}
+
+	/* Now, interrupt mode asynchrous mailbox command */
+	rc = lpfc_mbox_cmd_check(phba, mboxq);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"(%d):2543 Mailbox command x%x (x%x/x%x) "
+				"cannot issue Data: x%x x%x\n",
+				mboxq->vport ? mboxq->vport->vpi : 0,
+				mboxq->u.mb.mbxCommand,
+				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+				psli->sli_flag, flag);
+		goto out_not_finished;
+	}
+
+	/* Put the mailbox command to the driver internal FIFO */
+	psli->slistat.mbox_busy++;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	lpfc_mbox_put(phba, mboxq);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+			"(%d):0354 Mbox cmd issue - Enqueue Data: "
+			"x%x (x%x/x%x) x%x x%x x%x\n",
+			mboxq->vport ? mboxq->vport->vpi : 0xffffff,
+			bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+			phba->pport->port_state,
+			psli->sli_flag, MBX_NOWAIT);
+	/* Wake up worker thread to transport mailbox command from head */
+	lpfc_worker_wake_up(phba);
+
+	return MBX_BUSY;
+
+out_not_finished:
+	return MBX_NOT_FINISHED;
+}
+
+/**
+ * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called by worker thread to send a mailbox command to
+ * SLI4 HBA firmware.
+ *
+ **/
+int
+lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	LPFC_MBOXQ_t *mboxq;
+	int rc = MBX_SUCCESS;
+	unsigned long iflags;
+	struct lpfc_mqe *mqe;
+	uint32_t mbx_cmnd;
+
+	/* Check interrupt mode before post async mailbox command */
+	if (unlikely(!phba->sli4_hba.intr_enable))
+		return MBX_NOT_FINISHED;
+
+	/* Check for mailbox command service token */
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		return MBX_NOT_FINISHED;
+	}
+	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		return MBX_NOT_FINISHED;
+	}
+	if (unlikely(phba->sli.mbox_active)) {
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"0384 There is pending active mailbox cmd\n");
+		return MBX_NOT_FINISHED;
+	}
+	/* Take the mailbox command service token */
+	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+
+	/* Get the next mailbox command from head of queue */
+	mboxq = lpfc_mbox_get(phba);
+
+	/* If no more mailbox command waiting for post, we're done */
+	if (!mboxq) {
+		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		return MBX_SUCCESS;
+	}
+	phba->sli.mbox_active = mboxq;
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	/* Check device readiness for posting mailbox command */
+	rc = lpfc_mbox_dev_check(phba);
+	if (unlikely(rc))
+		/* Driver clean routine will clean up pending mailbox */
+		goto out_not_finished;
+
+	/* Prepare the mbox command to be posted */
+	mqe = &mboxq->u.mqe;
+	mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
+
+	/* Start timer for the mbox_tmo and log some mailbox post messages */
+	mod_timer(&psli->mbox_tmo, (jiffies +
+		  (HZ * lpfc_mbox_tmo_val(phba, mboxq))));
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+			"(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
+			"x%x x%x\n",
+			mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
+			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+			phba->pport->port_state, psli->sli_flag);
+
+	if (mbx_cmnd != MBX_HEARTBEAT) {
+		if (mboxq->vport) {
+			lpfc_debugfs_disc_trc(mboxq->vport,
+				LPFC_DISC_TRC_MBOX_VPORT,
+				"MBOX Send vport: cmd:x%x mb:x%x x%x",
+				mbx_cmnd, mqe->un.mb_words[0],
+				mqe->un.mb_words[1]);
+		} else {
+			lpfc_debugfs_disc_trc(phba->pport,
+				LPFC_DISC_TRC_MBOX,
+				"MBOX Send: cmd:x%x mb:x%x x%x",
+				mbx_cmnd, mqe->un.mb_words[0],
+				mqe->un.mb_words[1]);
+		}
+	}
+	psli->slistat.mbox_cmd++;
+
+	/* Post the mailbox command to the port */
+	rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
+	if (rc != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"(%d):2533 Mailbox command x%x (x%x/x%x) "
+				"cannot issue Data: x%x x%x\n",
+				mboxq->vport ? mboxq->vport->vpi : 0,
+				mboxq->u.mb.mbxCommand,
+				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
+				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
+				psli->sli_flag, MBX_NOWAIT);
+		goto out_not_finished;
+	}
+
+	return rc;
+
+out_not_finished:
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	if (phba->sli.mbox_active) {
+		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
+		__lpfc_mbox_cmpl_put(phba, mboxq);
+		/* Release the token */
+		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+		phba->sli.mbox_active = NULL;
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	return MBX_NOT_FINISHED;
+}
+
+/**
+ * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
+ * @phba: Pointer to HBA context object.
+ * @pmbox: Pointer to mailbox object.
+ * @flag: Flag indicating how the mailbox need to be processed.
+ *
+ * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
+ * the API jump table function pointer from the lpfc_hba struct.
+ *
+ * Return codes the caller owns the mailbox command after the return of the
+ * function.
+ **/
+int
+lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
+{
+	return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
+}
+
+/**
+ * lpfc_mbox_api_table_setup - Set up mbox api function jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the mbox interface API function jump table in @phba
+ * struct.
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+
+	switch (dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
+		phba->lpfc_sli_handle_slow_ring_event =
+				lpfc_sli_handle_slow_ring_event_s3;
+		phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
+		phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
+		phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
+		break;
+	case LPFC_PCI_DEV_OC:
+		phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
+		phba->lpfc_sli_handle_slow_ring_event =
+				lpfc_sli_handle_slow_ring_event_s4;
+		phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
+		phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
+		phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1420 Invalid HBA PCI-device group: 0x%x\n",
+				dev_grp);
+		return -ENODEV;
+		break;
+	}
+	return 0;
+}
+
+/**
+ * __lpfc_sli_ringtx_put - Add an iocb to the txq
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to address of newly added command iocb.
+ *
+ * This function is called with hbalock held to add a command
+ * iocb to the txq when SLI layer cannot submit the command iocb
+ * to the ring.
+ **/
+void
+__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+		    struct lpfc_iocbq *piocb)
+{
+	/* Insert the caller's iocb in the txq tail for later processing. */
+	list_add_tail(&piocb->list, &pring->txq);
+	pring->txq_cnt++;
+}
+
+/**
+ * lpfc_sli_next_iocb - Get the next iocb in the txq
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to address of newly added command iocb.
+ *
+ * This function is called with hbalock held before a new
+ * iocb is submitted to the firmware. This function checks
+ * txq to flush the iocbs in txq to Firmware before
+ * submitting new iocbs to the Firmware.
+ * If there are iocbs in the txq which need to be submitted
+ * to firmware, lpfc_sli_next_iocb returns the first element
+ * of the txq after dequeuing it from txq.
+ * If there is no iocb in the txq then the function will return
+ * *piocb and *piocb is set to NULL. Caller needs to check
+ * *piocb to find if there are more commands in the txq.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+		   struct lpfc_iocbq **piocb)
+{
+	struct lpfc_iocbq * nextiocb;
+
+	nextiocb = lpfc_sli_ringtx_get(phba, pring);
+	if (!nextiocb) {
+		nextiocb = *piocb;
+		*piocb = NULL;
+	}
+
+	return nextiocb;
+}
+
+/**
+ * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
+ * @phba: Pointer to HBA context object.
+ * @ring_number: SLI ring number to issue iocb on.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
+ * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
+ * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
+ * flag is turned on, the function returns IOCB_ERROR. When the link is down,
+ * this function allows only iocbs for posting buffers. This function finds
+ * next available slot in the command ring and posts the command to the
+ * available slot and writes the port attention register to request HBA start
+ * processing new iocb. If there is no slot available in the ring and
+ * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
+ * the function returns IOCB_BUSY.
+ *
+ * This function is called with hbalock held. The function will return success
+ * after it successfully submit the iocb to firmware or after adding to the
+ * txq.
+ **/
+static int
+__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
+		    struct lpfc_iocbq *piocb, uint32_t flag)
+{
+	struct lpfc_iocbq *nextiocb;
+	IOCB_t *iocb;
+	struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
+
+	if (piocb->iocb_cmpl && (!piocb->vport) &&
+	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
+	   (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
+		lpfc_printf_log(phba, KERN_ERR,
+				LOG_SLI | LOG_VPORT,
+				"1807 IOCB x%x failed. No vport\n",
+				piocb->iocb.ulpCommand);
+		dump_stack();
+		return IOCB_ERROR;
+	}
+
+
+	/* If the PCI channel is in offline state, do not post iocbs. */
+	if (unlikely(pci_channel_offline(phba->pcidev)))
+		return IOCB_ERROR;
+
+	/* If HBA has a deferred error attention, fail the iocb. */
+	if (unlikely(phba->hba_flag & DEFER_ERATT))
+		return IOCB_ERROR;
+
+	/*
+	 * We should never get an IOCB if we are in a < LINK_DOWN state
+	 */
+	if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+		return IOCB_ERROR;
+
+	/*
+	 * Check to see if we are blocking IOCB processing because of a
+	 * outstanding event.
+	 */
+	if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
+		goto iocb_busy;
+
+	if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
+		/*
+		 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
+		 * can be issued if the link is not up.
+		 */
+		switch (piocb->iocb.ulpCommand) {
+		case CMD_GEN_REQUEST64_CR:
+		case CMD_GEN_REQUEST64_CX:
+			if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
+				(piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
+					FC_RCTL_DD_UNSOL_CMD) ||
+				(piocb->iocb.un.genreq64.w5.hcsw.Type !=
+					MENLO_TRANSPORT_TYPE))
+
+				goto iocb_busy;
+			break;
+		case CMD_QUE_RING_BUF_CN:
+		case CMD_QUE_RING_BUF64_CN:
+			/*
+			 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
+			 * completion, iocb_cmpl MUST be 0.
+			 */
+			if (piocb->iocb_cmpl)
+				piocb->iocb_cmpl = NULL;
+			/*FALLTHROUGH*/
+		case CMD_CREATE_XRI_CR:
+		case CMD_CLOSE_XRI_CN:
+		case CMD_CLOSE_XRI_CX:
+			break;
+		default:
+			goto iocb_busy;
+		}
+
+	/*
+	 * For FCP commands, we must be in a state where we can process link
+	 * attention events.
+	 */
+	} else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
+			    !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
+		goto iocb_busy;
+	}
+
+	while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
+	       (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
+		lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
+
+	if (iocb)
+		lpfc_sli_update_ring(phba, pring);
+	else
+		lpfc_sli_update_full_ring(phba, pring);
+
+	if (!piocb)
+		return IOCB_SUCCESS;
+
+	goto out_busy;
+
+ iocb_busy:
+	pring->stats.iocb_cmd_delay++;
+
+ out_busy:
+
+	if (!(flag & SLI_IOCB_RET_IOCB)) {
+		__lpfc_sli_ringtx_put(phba, pring, piocb);
+		return IOCB_SUCCESS;
+	}
+
+	return IOCB_BUSY;
+}
+
+/**
+ * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
+ * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to command iocb.
+ * @sglq: Pointer to the scatter gather queue object.
+ *
+ * This routine converts the bpl or bde that is in the IOCB
+ * to a sgl list for the sli4 hardware. The physical address
+ * of the bpl/bde is converted back to a virtual address.
+ * If the IOCB contains a BPL then the list of BDE's is
+ * converted to sli4_sge's. If the IOCB contains a single
+ * BDE then it is converted to a single sli_sge.
+ * The IOCB is still in cpu endianess so the contents of
+ * the bpl can be used without byte swapping.
+ *
+ * Returns valid XRI = Success, NO_XRI = Failure.
+**/
+static uint16_t
+lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
+		struct lpfc_sglq *sglq)
+{
+	uint16_t xritag = NO_XRI;
+	struct ulp_bde64 *bpl = NULL;
+	struct ulp_bde64 bde;
+	struct sli4_sge *sgl  = NULL;
+	struct lpfc_dmabuf *dmabuf;
+	IOCB_t *icmd;
+	int numBdes = 0;
+	int i = 0;
+	uint32_t offset = 0; /* accumulated offset in the sg request list */
+	int inbound = 0; /* number of sg reply entries inbound from firmware */
+
+	if (!piocbq || !sglq)
+		return xritag;
+
+	sgl  = (struct sli4_sge *)sglq->sgl;
+	icmd = &piocbq->iocb;
+	if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
+		return sglq->sli4_xritag;
+	if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
+		numBdes = icmd->un.genreq64.bdl.bdeSize /
+				sizeof(struct ulp_bde64);
+		/* The addrHigh and addrLow fields within the IOCB
+		 * have not been byteswapped yet so there is no
+		 * need to swap them back.
+		 */
+		if (piocbq->context3)
+			dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
+		else
+			return xritag;
+
+		bpl  = (struct ulp_bde64 *)dmabuf->virt;
+		if (!bpl)
+			return xritag;
+
+		for (i = 0; i < numBdes; i++) {
+			/* Should already be byte swapped. */
+			sgl->addr_hi = bpl->addrHigh;
+			sgl->addr_lo = bpl->addrLow;
+
+			sgl->word2 = le32_to_cpu(sgl->word2);
+			if ((i+1) == numBdes)
+				bf_set(lpfc_sli4_sge_last, sgl, 1);
+			else
+				bf_set(lpfc_sli4_sge_last, sgl, 0);
+			/* swap the size field back to the cpu so we
+			 * can assign it to the sgl.
+			 */
+			bde.tus.w = le32_to_cpu(bpl->tus.w);
+			sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
+			/* The offsets in the sgl need to be accumulated
+			 * separately for the request and reply lists.
+			 * The request is always first, the reply follows.
+			 */
+			if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
+				/* add up the reply sg entries */
+				if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
+					inbound++;
+				/* first inbound? reset the offset */
+				if (inbound == 1)
+					offset = 0;
+				bf_set(lpfc_sli4_sge_offset, sgl, offset);
+				bf_set(lpfc_sli4_sge_type, sgl,
+					LPFC_SGE_TYPE_DATA);
+				offset += bde.tus.f.bdeSize;
+			}
+			sgl->word2 = cpu_to_le32(sgl->word2);
+			bpl++;
+			sgl++;
+		}
+	} else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
+			/* The addrHigh and addrLow fields of the BDE have not
+			 * been byteswapped yet so they need to be swapped
+			 * before putting them in the sgl.
+			 */
+			sgl->addr_hi =
+				cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
+			sgl->addr_lo =
+				cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
+			sgl->word2 = le32_to_cpu(sgl->word2);
+			bf_set(lpfc_sli4_sge_last, sgl, 1);
+			sgl->word2 = cpu_to_le32(sgl->word2);
+			sgl->sge_len =
+				cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
+	}
+	return sglq->sli4_xritag;
+}
+
+/**
+ * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
+ * distribution.  This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
+ * held.
+ *
+ * Return: index into SLI4 fast-path FCP queue index.
+ **/
+static uint32_t
+lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
+{
+	++phba->fcp_qidx;
+	if (phba->fcp_qidx >= phba->cfg_fcp_wq_count)
+		phba->fcp_qidx = 0;
+
+	return phba->fcp_qidx;
+}
+
+/**
+ * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
+ * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to command iocb.
+ * @wqe: Pointer to the work queue entry.
+ *
+ * This routine converts the iocb command to its Work Queue Entry
+ * equivalent. The wqe pointer should not have any fields set when
+ * this routine is called because it will memcpy over them.
+ * This routine does not set the CQ_ID or the WQEC bits in the
+ * wqe.
+ *
+ * Returns: 0 = Success, IOCB_ERROR = Failure.
+ **/
+static int
+lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
+		union lpfc_wqe *wqe)
+{
+	uint32_t xmit_len = 0, total_len = 0;
+	uint8_t ct = 0;
+	uint32_t fip;
+	uint32_t abort_tag;
+	uint8_t command_type = ELS_COMMAND_NON_FIP;
+	uint8_t cmnd;
+	uint16_t xritag;
+	uint16_t abrt_iotag;
+	struct lpfc_iocbq *abrtiocbq;
+	struct ulp_bde64 *bpl = NULL;
+	uint32_t els_id = LPFC_ELS_ID_DEFAULT;
+	int numBdes, i;
+	struct ulp_bde64 bde;
+	struct lpfc_nodelist *ndlp;
+	uint32_t *pcmd;
+	uint32_t if_type;
+
+	fip = phba->hba_flag & HBA_FIP_SUPPORT;
+	/* The fcp commands will set command type */
+	if (iocbq->iocb_flag &  LPFC_IO_FCP)
+		command_type = FCP_COMMAND;
+	else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
+		command_type = ELS_COMMAND_FIP;
+	else
+		command_type = ELS_COMMAND_NON_FIP;
+
+	/* Some of the fields are in the right position already */
+	memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
+	abort_tag = (uint32_t) iocbq->iotag;
+	xritag = iocbq->sli4_xritag;
+	wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
+	/* words0-2 bpl convert bde */
+	if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
+		numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
+				sizeof(struct ulp_bde64);
+		bpl  = (struct ulp_bde64 *)
+			((struct lpfc_dmabuf *)iocbq->context3)->virt;
+		if (!bpl)
+			return IOCB_ERROR;
+
+		/* Should already be byte swapped. */
+		wqe->generic.bde.addrHigh =  le32_to_cpu(bpl->addrHigh);
+		wqe->generic.bde.addrLow =  le32_to_cpu(bpl->addrLow);
+		/* swap the size field back to the cpu so we
+		 * can assign it to the sgl.
+		 */
+		wqe->generic.bde.tus.w  = le32_to_cpu(bpl->tus.w);
+		xmit_len = wqe->generic.bde.tus.f.bdeSize;
+		total_len = 0;
+		for (i = 0; i < numBdes; i++) {
+			bde.tus.w  = le32_to_cpu(bpl[i].tus.w);
+			total_len += bde.tus.f.bdeSize;
+		}
+	} else
+		xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
+
+	iocbq->iocb.ulpIoTag = iocbq->iotag;
+	cmnd = iocbq->iocb.ulpCommand;
+
+	switch (iocbq->iocb.ulpCommand) {
+	case CMD_ELS_REQUEST64_CR:
+		ndlp = (struct lpfc_nodelist *)iocbq->context1;
+		if (!iocbq->iocb.ulpLe) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2007 Only Limited Edition cmd Format"
+				" supported 0x%x\n",
+				iocbq->iocb.ulpCommand);
+			return IOCB_ERROR;
+		}
+
+		wqe->els_req.payload_len = xmit_len;
+		/* Els_reguest64 has a TMO */
+		bf_set(wqe_tmo, &wqe->els_req.wqe_com,
+			iocbq->iocb.ulpTimeout);
+		/* Need a VF for word 4 set the vf bit*/
+		bf_set(els_req64_vf, &wqe->els_req, 0);
+		/* And a VFID for word 12 */
+		bf_set(els_req64_vfid, &wqe->els_req, 0);
+		ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
+		bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+		       iocbq->iocb.ulpContext);
+		bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
+		bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
+		/* CCP CCPE PV PRI in word10 were set in the memcpy */
+		if (command_type == ELS_COMMAND_FIP)
+			els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
+					>> LPFC_FIP_ELS_ID_SHIFT);
+		pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
+					iocbq->context2)->virt);
+		if_type = bf_get(lpfc_sli_intf_if_type,
+					&phba->sli4_hba.sli_intf);
+		if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+			if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
+				*pcmd == ELS_CMD_SCR ||
+				*pcmd == ELS_CMD_FDISC ||
+				*pcmd == ELS_CMD_LOGO ||
+				*pcmd == ELS_CMD_PLOGI)) {
+				bf_set(els_req64_sp, &wqe->els_req, 1);
+				bf_set(els_req64_sid, &wqe->els_req,
+					iocbq->vport->fc_myDID);
+				bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
+				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+					phba->vpi_ids[phba->pport->vpi]);
+			} else if (pcmd && iocbq->context1) {
+				bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
+				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+					phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+			}
+		}
+		bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
+		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+		bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
+		bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
+		bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
+		bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+		bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
+		break;
+	case CMD_XMIT_SEQUENCE64_CX:
+		bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
+		       iocbq->iocb.un.ulpWord[3]);
+		bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
+		       iocbq->iocb.unsli3.rcvsli3.ox_id);
+		/* The entire sequence is transmitted for this IOCB */
+		xmit_len = total_len;
+		cmnd = CMD_XMIT_SEQUENCE64_CR;
+		if (phba->link_flag & LS_LOOPBACK_MODE)
+			bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
+	case CMD_XMIT_SEQUENCE64_CR:
+		/* word3 iocb=io_tag32 wqe=reserved */
+		wqe->xmit_sequence.rsvd3 = 0;
+		/* word4 relative_offset memcpy */
+		/* word5 r_ctl/df_ctl memcpy */
+		bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
+		bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
+		       LPFC_WQE_IOD_WRITE);
+		bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
+		       LPFC_WQE_LENLOC_WORD12);
+		bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
+		wqe->xmit_sequence.xmit_len = xmit_len;
+		command_type = OTHER_COMMAND;
+		break;
+	case CMD_XMIT_BCAST64_CN:
+		/* word3 iocb=iotag32 wqe=seq_payload_len */
+		wqe->xmit_bcast64.seq_payload_len = xmit_len;
+		/* word4 iocb=rsvd wqe=rsvd */
+		/* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
+		/* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
+		bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
+			((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+		bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
+		bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
+		       LPFC_WQE_LENLOC_WORD3);
+		bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
+		break;
+	case CMD_FCP_IWRITE64_CR:
+		command_type = FCP_COMMAND_DATA_OUT;
+		/* word3 iocb=iotag wqe=payload_offset_len */
+		/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+		wqe->fcp_iwrite.payload_offset_len =
+			xmit_len + sizeof(struct fcp_rsp);
+		/* word4 iocb=parameter wqe=total_xfer_length memcpy */
+		/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
+		bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
+		       iocbq->iocb.ulpFCP2Rcvy);
+		bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
+		/* Always open the exchange */
+		bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
+		bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
+		bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
+		       LPFC_WQE_LENLOC_WORD4);
+		bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
+		bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
+		if (iocbq->iocb_flag & LPFC_IO_DIF) {
+			iocbq->iocb_flag &= ~LPFC_IO_DIF;
+			bf_set(wqe_dif, &wqe->generic.wqe_com, 1);
+		}
+		bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
+		break;
+	case CMD_FCP_IREAD64_CR:
+		/* word3 iocb=iotag wqe=payload_offset_len */
+		/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+		wqe->fcp_iread.payload_offset_len =
+			xmit_len + sizeof(struct fcp_rsp);
+		/* word4 iocb=parameter wqe=total_xfer_length memcpy */
+		/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
+		bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
+		       iocbq->iocb.ulpFCP2Rcvy);
+		bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
+		/* Always open the exchange */
+		bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
+		bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
+		bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
+		       LPFC_WQE_LENLOC_WORD4);
+		bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
+		bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
+		if (iocbq->iocb_flag & LPFC_IO_DIF) {
+			iocbq->iocb_flag &= ~LPFC_IO_DIF;
+			bf_set(wqe_dif, &wqe->generic.wqe_com, 1);
+		}
+		bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
+		break;
+	case CMD_FCP_ICMND64_CR:
+		/* word3 iocb=IO_TAG wqe=reserved */
+		wqe->fcp_icmd.rsrvd3 = 0;
+		bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
+		/* Always open the exchange */
+		bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
+		bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
+		bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
+		bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
+		       LPFC_WQE_LENLOC_NONE);
+		bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
+		break;
+	case CMD_GEN_REQUEST64_CR:
+		/* For this command calculate the xmit length of the
+		 * request bde.
+		 */
+		xmit_len = 0;
+		numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
+			sizeof(struct ulp_bde64);
+		for (i = 0; i < numBdes; i++) {
+			bde.tus.w = le32_to_cpu(bpl[i].tus.w);
+			if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
+				break;
+			xmit_len += bde.tus.f.bdeSize;
+		}
+		/* word3 iocb=IO_TAG wqe=request_payload_len */
+		wqe->gen_req.request_payload_len = xmit_len;
+		/* word4 iocb=parameter wqe=relative_offset memcpy */
+		/* word5 [rctl, type, df_ctl, la] copied in memcpy */
+		/* word6 context tag copied in memcpy */
+		if (iocbq->iocb.ulpCt_h  || iocbq->iocb.ulpCt_l) {
+			ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2015 Invalid CT %x command 0x%x\n",
+				ct, iocbq->iocb.ulpCommand);
+			return IOCB_ERROR;
+		}
+		bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
+		bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
+		bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
+		bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
+		bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
+		bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+		bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
+		command_type = OTHER_COMMAND;
+		break;
+	case CMD_XMIT_ELS_RSP64_CX:
+		ndlp = (struct lpfc_nodelist *)iocbq->context1;
+		/* words0-2 BDE memcpy */
+		/* word3 iocb=iotag32 wqe=response_payload_len */
+		wqe->xmit_els_rsp.response_payload_len = xmit_len;
+		/* word4 iocb=did wge=rsvd. */
+		wqe->xmit_els_rsp.rsvd4 = 0;
+		/* word5 iocb=rsvd wge=did */
+		bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
+			 iocbq->iocb.un.elsreq64.remoteID);
+		bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
+		       ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+		bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
+		bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+		       iocbq->iocb.unsli3.rcvsli3.ox_id);
+		if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
+			bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+			       phba->vpi_ids[iocbq->vport->vpi]);
+		bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
+		bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
+		bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
+		       LPFC_WQE_LENLOC_WORD3);
+		bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
+		bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
+		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+		pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
+					iocbq->context2)->virt);
+		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+				bf_set(els_req64_sp, &wqe->els_req, 1);
+				bf_set(els_req64_sid, &wqe->els_req,
+					iocbq->vport->fc_myDID);
+				bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
+				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+					phba->vpi_ids[phba->pport->vpi]);
+		}
+		command_type = OTHER_COMMAND;
+		break;
+	case CMD_CLOSE_XRI_CN:
+	case CMD_ABORT_XRI_CN:
+	case CMD_ABORT_XRI_CX:
+		/* words 0-2 memcpy should be 0 rserved */
+		/* port will send abts */
+		abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
+		if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
+			abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
+			fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
+		} else
+			fip = 0;
+
+		if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
+			/*
+			 * The link is down, or the command was ELS_FIP
+			 * so the fw does not need to send abts
+			 * on the wire.
+			 */
+			bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
+		else
+			bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
+		bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
+		/* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
+		wqe->abort_cmd.rsrvd5 = 0;
+		bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
+			((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+		abort_tag = iocbq->iocb.un.acxri.abortIoTag;
+		/*
+		 * The abort handler will send us CMD_ABORT_XRI_CN or
+		 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
+		 */
+		bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+		bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
+		bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
+		       LPFC_WQE_LENLOC_NONE);
+		cmnd = CMD_ABORT_XRI_CX;
+		command_type = OTHER_COMMAND;
+		xritag = 0;
+		break;
+	case CMD_XMIT_BLS_RSP64_CX:
+		ndlp = (struct lpfc_nodelist *)iocbq->context1;
+		/* As BLS ABTS RSP WQE is very different from other WQEs,
+		 * we re-construct this WQE here based on information in
+		 * iocbq from scratch.
+		 */
+		memset(wqe, 0, sizeof(union lpfc_wqe));
+		/* OX_ID is invariable to who sent ABTS to CT exchange */
+		bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
+		       bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
+		if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
+		    LPFC_ABTS_UNSOL_INT) {
+			/* ABTS sent by initiator to CT exchange, the
+			 * RX_ID field will be filled with the newly
+			 * allocated responder XRI.
+			 */
+			bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
+			       iocbq->sli4_xritag);
+		} else {
+			/* ABTS sent by responder to CT exchange, the
+			 * RX_ID field will be filled with the responder
+			 * RX_ID from ABTS.
+			 */
+			bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
+			       bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
+		}
+		bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
+		bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
+
+		/* Use CT=VPI */
+		bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
+			ndlp->nlp_DID);
+		bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
+			iocbq->iocb.ulpContext);
+		bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
+		bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
+			phba->vpi_ids[phba->pport->vpi]);
+		bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
+		bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
+		       LPFC_WQE_LENLOC_NONE);
+		/* Overwrite the pre-set comnd type with OTHER_COMMAND */
+		command_type = OTHER_COMMAND;
+		if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
+			bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
+			       bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
+			bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
+			       bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
+			bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
+			       bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
+		}
+
+		break;
+	case CMD_XRI_ABORTED_CX:
+	case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
+	case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
+	case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
+	case CMD_FCP_TRSP64_CX: /* Target mode rcv */
+	case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2014 Invalid command 0x%x\n",
+				iocbq->iocb.ulpCommand);
+		return IOCB_ERROR;
+		break;
+	}
+
+	bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
+	bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
+	wqe->generic.wqe_com.abort_tag = abort_tag;
+	bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
+	bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
+	bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
+	bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+	return 0;
+}
+
+/**
+ * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
+ * @phba: Pointer to HBA context object.
+ * @ring_number: SLI ring number to issue iocb on.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
+ * an iocb command to an HBA with SLI-4 interface spec.
+ *
+ * This function is called with hbalock held. The function will return success
+ * after it successfully submit the iocb to firmware or after adding to the
+ * txq.
+ **/
+static int
+__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
+			 struct lpfc_iocbq *piocb, uint32_t flag)
+{
+	struct lpfc_sglq *sglq;
+	union lpfc_wqe wqe;
+	struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
+
+	if (piocb->sli4_xritag == NO_XRI) {
+		if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
+		    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+			sglq = NULL;
+		else {
+			if (pring->txq_cnt) {
+				if (!(flag & SLI_IOCB_RET_IOCB)) {
+					__lpfc_sli_ringtx_put(phba,
+						pring, piocb);
+					return IOCB_SUCCESS;
+				} else {
+					return IOCB_BUSY;
+				}
+			} else {
+				sglq = __lpfc_sli_get_sglq(phba, piocb);
+				if (!sglq) {
+					if (!(flag & SLI_IOCB_RET_IOCB)) {
+						__lpfc_sli_ringtx_put(phba,
+								pring,
+								piocb);
+						return IOCB_SUCCESS;
+					} else
+						return IOCB_BUSY;
+				}
+			}
+		}
+	} else if (piocb->iocb_flag &  LPFC_IO_FCP) {
+		/* These IO's already have an XRI and a mapped sgl. */
+		sglq = NULL;
+	} else {
+		/*
+		 * This is a continuation of a commandi,(CX) so this
+		 * sglq is on the active list
+		 */
+		sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
+		if (!sglq)
+			return IOCB_ERROR;
+	}
+
+	if (sglq) {
+		piocb->sli4_lxritag = sglq->sli4_lxritag;
+		piocb->sli4_xritag = sglq->sli4_xritag;
+		if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
+			return IOCB_ERROR;
+	}
+
+	if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
+		return IOCB_ERROR;
+
+	if ((piocb->iocb_flag & LPFC_IO_FCP) ||
+		(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+		/*
+		 * For FCP command IOCB, get a new WQ index to distribute
+		 * WQE across the WQsr. On the other hand, for abort IOCB,
+		 * it carries the same WQ index to the original command
+		 * IOCB.
+		 */
+		if (piocb->iocb_flag & LPFC_IO_FCP)
+			piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
+		if (unlikely(!phba->sli4_hba.fcp_wq))
+			return IOCB_ERROR;
+		if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
+				     &wqe))
+			return IOCB_ERROR;
+	} else {
+		if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
+			return IOCB_ERROR;
+	}
+	lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
+
+	return 0;
+}
+
+/**
+ * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
+ *
+ * This routine wraps the actual lockless version for issusing IOCB function
+ * pointer from the lpfc_hba struct.
+ *
+ * Return codes:
+ * 	IOCB_ERROR - Error
+ * 	IOCB_SUCCESS - Success
+ * 	IOCB_BUSY - Busy
+ **/
+int
+__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
+		struct lpfc_iocbq *piocb, uint32_t flag)
+{
+	return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
+}
+
+/**
+ * lpfc_sli_api_table_setup - Set up sli api function jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the SLI interface API function jump table in @phba
+ * struct.
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+
+	switch (dev_grp) {
+	case LPFC_PCI_DEV_LP:
+		phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
+		phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
+		break;
+	case LPFC_PCI_DEV_OC:
+		phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
+		phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"1419 Invalid HBA PCI-device group: 0x%x\n",
+				dev_grp);
+		return -ENODEV;
+		break;
+	}
+	phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
+	return 0;
+}
+
+/**
+ * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
+ * function. This function gets the hbalock and calls
+ * __lpfc_sli_issue_iocb function and will return the error returned
+ * by __lpfc_sli_issue_iocb function. This wrapper is used by
+ * functions which do not hold hbalock.
+ **/
+int
+lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
+		    struct lpfc_iocbq *piocb, uint32_t flag)
+{
+	unsigned long iflags;
+	int rc;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	return rc;
+}
+
+/**
+ * lpfc_extra_ring_setup - Extra ring setup function
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called while driver attaches with the
+ * HBA to setup the extra ring. The extra ring is used
+ * only when driver needs to support target mode functionality
+ * or IP over FC functionalities.
+ *
+ * This function is called with no lock held.
+ **/
+static int
+lpfc_extra_ring_setup( struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli;
+	struct lpfc_sli_ring *pring;
+
+	psli = &phba->sli;
+
+	/* Adjust cmd/rsp ring iocb entries more evenly */
+
+	/* Take some away from the FCP ring */
+	pring = &psli->ring[psli->fcp_ring];
+	pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
+	pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
+	pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
+	pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+
+	/* and give them to the extra ring */
+	pring = &psli->ring[psli->extra_ring];
+
+	pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
+	pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
+	pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
+	pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+
+	/* Setup default profile for this ring */
+	pring->iotag_max = 4096;
+	pring->num_mask = 1;
+	pring->prt[0].profile = 0;      /* Mask 0 */
+	pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
+	pring->prt[0].type = phba->cfg_multi_ring_type;
+	pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
+	return 0;
+}
+
+/* lpfc_sli_abts_recover_port - Recover a port that failed an ABTS.
+ * @vport: pointer to virtual port object.
+ * @ndlp: nodelist pointer for the impacted rport.
+ *
+ * The driver calls this routine in response to a XRI ABORT CQE
+ * event from the port.  In this event, the driver is required to
+ * recover its login to the rport even though its login may be valid
+ * from the driver's perspective.  The failed ABTS notice from the
+ * port indicates the rport is not responding.
+ */
+static void
+lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
+			   struct lpfc_nodelist *ndlp)
+{
+	struct Scsi_Host *shost;
+	struct lpfc_hba *phba;
+	unsigned long flags = 0;
+
+	shost = lpfc_shost_from_vport(vport);
+	phba = vport->phba;
+	if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
+		lpfc_printf_log(phba, KERN_INFO,
+			LOG_SLI, "3093 No rport recovery needed. "
+			"rport in state 0x%x\n",
+			ndlp->nlp_state);
+		return;
+	}
+	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"3094 Start rport recovery on shost id 0x%x "
+			"fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
+			"flags 0x%x\n",
+			shost->host_no, ndlp->nlp_DID,
+			vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
+			ndlp->nlp_flag);
+	/*
+	 * The rport is not responding.  Don't attempt ADISC recovery.
+	 * Remove the FCP-2 flag to force a PLOGI.
+	 */
+	spin_lock_irqsave(shost->host_lock, flags);
+	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+	spin_unlock_irqrestore(shost->host_lock, flags);
+	lpfc_disc_state_machine(vport, ndlp, NULL,
+				NLP_EVT_DEVICE_RECOVERY);
+	lpfc_cancel_retry_delay_tmo(vport, ndlp);
+	spin_lock_irqsave(shost->host_lock, flags);
+	ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+	spin_unlock_irqrestore(shost->host_lock, flags);
+	lpfc_disc_start(vport);
+}
+
+/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to iocb object.
+ *
+ * The async_event handler calls this routine when it receives
+ * an ASYNC_STATUS_CN event from the port.  The port generates
+ * this event when an Abort Sequence request to an rport fails
+ * twice in succession.  The abort could be originated by the
+ * driver or by the port.  The ABTS could have been for an ELS
+ * or FCP IO.  The port only generates this event when an ABTS
+ * fails to complete after one retry.
+ */
+static void
+lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
+			  struct lpfc_iocbq *iocbq)
+{
+	struct lpfc_nodelist *ndlp = NULL;
+	uint16_t rpi = 0, vpi = 0;
+	struct lpfc_vport *vport = NULL;
+
+	/* The rpi in the ulpContext is vport-sensitive. */
+	vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
+	rpi = iocbq->iocb.ulpContext;
+
+	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+			"3092 Port generated ABTS async event "
+			"on vpi %d rpi %d status 0x%x\n",
+			vpi, rpi, iocbq->iocb.ulpStatus);
+
+	vport = lpfc_find_vport_by_vpid(phba, vpi);
+	if (!vport)
+		goto err_exit;
+	ndlp = lpfc_findnode_rpi(vport, rpi);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+		goto err_exit;
+
+	if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
+		lpfc_sli_abts_recover_port(vport, ndlp);
+	return;
+
+ err_exit:
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"3095 Event Context not found, no "
+			"action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
+			iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
+			vpi, rpi);
+}
+
+/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
+ * @phba: pointer to HBA context object.
+ * @ndlp: nodelist pointer for the impacted rport.
+ * @axri: pointer to the wcqe containing the failed exchange.
+ *
+ * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
+ * port.  The port generates this event when an abort exchange request to an
+ * rport fails twice in succession with no reply.  The abort could be originated
+ * by the driver or by the port.  The ABTS could have been for an ELS or FCP IO.
+ */
+void
+lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
+			   struct lpfc_nodelist *ndlp,
+			   struct sli4_wcqe_xri_aborted *axri)
+{
+	struct lpfc_vport *vport;
+	uint32_t ext_status = 0;
+
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"3115 Node Context not found, driver "
+				"ignoring abts err event\n");
+		return;
+	}
+
+	vport = ndlp->vport;
+	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+			"3116 Port generated FCP XRI ABORT event on "
+			"vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
+			ndlp->vport->vpi, ndlp->nlp_rpi,
+			bf_get(lpfc_wcqe_xa_xri, axri),
+			bf_get(lpfc_wcqe_xa_status, axri),
+			axri->parameter);
+
+	/*
+	 * Catch the ABTS protocol failure case.  Older OCe FW releases returned
+	 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
+	 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
+	 */
+	ext_status = axri->parameter & WCQE_PARAM_MASK;
+	if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
+	    ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
+		lpfc_sli_abts_recover_port(vport, ndlp);
+}
+
+/**
+ * lpfc_sli_async_event_handler - ASYNC iocb handler function
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @iocbq: Pointer to iocb object.
+ *
+ * This function is called by the slow ring event handler
+ * function when there is an ASYNC event iocb in the ring.
+ * This function is called with no lock held.
+ * Currently this function handles only temperature related
+ * ASYNC events. The function decodes the temperature sensor
+ * event message and posts events for the management applications.
+ **/
+static void
+lpfc_sli_async_event_handler(struct lpfc_hba * phba,
+	struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
+{
+	IOCB_t *icmd;
+	uint16_t evt_code;
+	struct temp_event temp_event_data;
+	struct Scsi_Host *shost;
+	uint32_t *iocb_w;
+
+	icmd = &iocbq->iocb;
+	evt_code = icmd->un.asyncstat.evt_code;
+
+	switch (evt_code) {
+	case ASYNC_TEMP_WARN:
+	case ASYNC_TEMP_SAFE:
+		temp_event_data.data = (uint32_t) icmd->ulpContext;
+		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+		if (evt_code == ASYNC_TEMP_WARN) {
+			temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
+			lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
+				"0347 Adapter is very hot, please take "
+				"corrective action. temperature : %d Celsius\n",
+				(uint32_t) icmd->ulpContext);
+		} else {
+			temp_event_data.event_code = LPFC_NORMAL_TEMP;
+			lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
+				"0340 Adapter temperature is OK now. "
+				"temperature : %d Celsius\n",
+				(uint32_t) icmd->ulpContext);
+		}
+
+		/* Send temperature change event to applications */
+		shost = lpfc_shost_from_vport(phba->pport);
+		fc_host_post_vendor_event(shost, fc_get_event_number(),
+			sizeof(temp_event_data), (char *) &temp_event_data,
+			LPFC_NL_VENDOR_ID);
+		break;
+	case ASYNC_STATUS_CN:
+		lpfc_sli_abts_err_handler(phba, iocbq);
+		break;
+	default:
+		iocb_w = (uint32_t *) icmd;
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"0346 Ring %d handler: unexpected ASYNC_STATUS"
+			" evt_code 0x%x\n"
+			"W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
+			"W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
+			"W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
+			"W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
+			pring->ringno, icmd->un.asyncstat.evt_code,
+			iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
+			iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
+			iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
+			iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
+
+		break;
+	}
+}
+
+
+/**
+ * lpfc_sli_setup - SLI ring setup function
+ * @phba: Pointer to HBA context object.
+ *
+ * lpfc_sli_setup sets up rings of the SLI interface with
+ * number of iocbs per ring and iotags. This function is
+ * called while driver attach to the HBA and before the
+ * interrupts are enabled. So there is no need for locking.
+ *
+ * This function always returns 0.
+ **/
+int
+lpfc_sli_setup(struct lpfc_hba *phba)
+{
+	int i, totiocbsize = 0;
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *pring;
+
+	psli->num_rings = MAX_CONFIGURED_RINGS;
+	psli->sli_flag = 0;
+	psli->fcp_ring = LPFC_FCP_RING;
+	psli->next_ring = LPFC_FCP_NEXT_RING;
+	psli->extra_ring = LPFC_EXTRA_RING;
+
+	psli->iocbq_lookup = NULL;
+	psli->iocbq_lookup_len = 0;
+	psli->last_iotag = 0;
+
+	for (i = 0; i < psli->num_rings; i++) {
+		pring = &psli->ring[i];
+		switch (i) {
+		case LPFC_FCP_RING:	/* ring 0 - FCP */
+			/* numCiocb and numRiocb are used in config_port */
+			pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
+			pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
+			pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
+			pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
+			pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
+			pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+			pring->sizeCiocb = (phba->sli_rev == 3) ?
+							SLI3_IOCB_CMD_SIZE :
+							SLI2_IOCB_CMD_SIZE;
+			pring->sizeRiocb = (phba->sli_rev == 3) ?
+							SLI3_IOCB_RSP_SIZE :
+							SLI2_IOCB_RSP_SIZE;
+			pring->iotag_ctr = 0;
+			pring->iotag_max =
+			    (phba->cfg_hba_queue_depth * 2);
+			pring->fast_iotag = pring->iotag_max;
+			pring->num_mask = 0;
+			break;
+		case LPFC_EXTRA_RING:	/* ring 1 - EXTRA */
+			/* numCiocb and numRiocb are used in config_port */
+			pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
+			pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
+			pring->sizeCiocb = (phba->sli_rev == 3) ?
+							SLI3_IOCB_CMD_SIZE :
+							SLI2_IOCB_CMD_SIZE;
+			pring->sizeRiocb = (phba->sli_rev == 3) ?
+							SLI3_IOCB_RSP_SIZE :
+							SLI2_IOCB_RSP_SIZE;
+			pring->iotag_max = phba->cfg_hba_queue_depth;
+			pring->num_mask = 0;
+			break;
+		case LPFC_ELS_RING:	/* ring 2 - ELS / CT */
+			/* numCiocb and numRiocb are used in config_port */
+			pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
+			pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
+			pring->sizeCiocb = (phba->sli_rev == 3) ?
+							SLI3_IOCB_CMD_SIZE :
+							SLI2_IOCB_CMD_SIZE;
+			pring->sizeRiocb = (phba->sli_rev == 3) ?
+							SLI3_IOCB_RSP_SIZE :
+							SLI2_IOCB_RSP_SIZE;
+			pring->fast_iotag = 0;
+			pring->iotag_ctr = 0;
+			pring->iotag_max = 4096;
+			pring->lpfc_sli_rcv_async_status =
+				lpfc_sli_async_event_handler;
+			pring->num_mask = LPFC_MAX_RING_MASK;
+			pring->prt[0].profile = 0;	/* Mask 0 */
+			pring->prt[0].rctl = FC_RCTL_ELS_REQ;
+			pring->prt[0].type = FC_TYPE_ELS;
+			pring->prt[0].lpfc_sli_rcv_unsol_event =
+			    lpfc_els_unsol_event;
+			pring->prt[1].profile = 0;	/* Mask 1 */
+			pring->prt[1].rctl = FC_RCTL_ELS_REP;
+			pring->prt[1].type = FC_TYPE_ELS;
+			pring->prt[1].lpfc_sli_rcv_unsol_event =
+			    lpfc_els_unsol_event;
+			pring->prt[2].profile = 0;	/* Mask 2 */
+			/* NameServer Inquiry */
+			pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
+			/* NameServer */
+			pring->prt[2].type = FC_TYPE_CT;
+			pring->prt[2].lpfc_sli_rcv_unsol_event =
+			    lpfc_ct_unsol_event;
+			pring->prt[3].profile = 0;	/* Mask 3 */
+			/* NameServer response */
+			pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
+			/* NameServer */
+			pring->prt[3].type = FC_TYPE_CT;
+			pring->prt[3].lpfc_sli_rcv_unsol_event =
+			    lpfc_ct_unsol_event;
+			/* abort unsolicited sequence */
+			pring->prt[4].profile = 0;	/* Mask 4 */
+			pring->prt[4].rctl = FC_RCTL_BA_ABTS;
+			pring->prt[4].type = FC_TYPE_BLS;
+			pring->prt[4].lpfc_sli_rcv_unsol_event =
+			    lpfc_sli4_ct_abort_unsol_event;
+			break;
+		}
+		totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
+				(pring->numRiocb * pring->sizeRiocb);
+	}
+	if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
+		/* Too many cmd / rsp ring entries in SLI2 SLIM */
+		printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
+		       "SLI2 SLIM Data: x%x x%lx\n",
+		       phba->brd_no, totiocbsize,
+		       (unsigned long) MAX_SLIM_IOCB_SIZE);
+	}
+	if (phba->cfg_multi_ring_support == 2)
+		lpfc_extra_ring_setup(phba);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_queue_setup - Queue initialization function
+ * @phba: Pointer to HBA context object.
+ *
+ * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
+ * ring. This function also initializes ring indices of each ring.
+ * This function is called during the initialization of the SLI
+ * interface of an HBA.
+ * This function is called with no lock held and always returns
+ * 1.
+ **/
+int
+lpfc_sli_queue_setup(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli;
+	struct lpfc_sli_ring *pring;
+	int i;
+
+	psli = &phba->sli;
+	spin_lock_irq(&phba->hbalock);
+	INIT_LIST_HEAD(&psli->mboxq);
+	INIT_LIST_HEAD(&psli->mboxq_cmpl);
+	/* Initialize list headers for txq and txcmplq as double linked lists */
+	for (i = 0; i < psli->num_rings; i++) {
+		pring = &psli->ring[i];
+		pring->ringno = i;
+		pring->next_cmdidx  = 0;
+		pring->local_getidx = 0;
+		pring->cmdidx = 0;
+		INIT_LIST_HEAD(&pring->txq);
+		INIT_LIST_HEAD(&pring->txcmplq);
+		INIT_LIST_HEAD(&pring->iocb_continueq);
+		INIT_LIST_HEAD(&pring->iocb_continue_saveq);
+		INIT_LIST_HEAD(&pring->postbufq);
+	}
+	spin_unlock_irq(&phba->hbalock);
+	return 1;
+}
+
+/**
+ * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine flushes the mailbox command subsystem. It will unconditionally
+ * flush all the mailbox commands in the three possible stages in the mailbox
+ * command sub-system: pending mailbox command queue; the outstanding mailbox
+ * command; and completed mailbox command queue. It is caller's responsibility
+ * to make sure that the driver is in the proper state to flush the mailbox
+ * command sub-system. Namely, the posting of mailbox commands into the
+ * pending mailbox command queue from the various clients must be stopped;
+ * either the HBA is in a state that it will never works on the outstanding
+ * mailbox command (such as in EEH or ERATT conditions) or the outstanding
+ * mailbox command has been completed.
+ **/
+static void
+lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
+{
+	LIST_HEAD(completions);
+	struct lpfc_sli *psli = &phba->sli;
+	LPFC_MBOXQ_t *pmb;
+	unsigned long iflag;
+
+	/* Flush all the mailbox commands in the mbox system */
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	/* The pending mailbox command queue */
+	list_splice_init(&phba->sli.mboxq, &completions);
+	/* The outstanding active mailbox command */
+	if (psli->mbox_active) {
+		list_add_tail(&psli->mbox_active->list, &completions);
+		psli->mbox_active = NULL;
+		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+	}
+	/* The completed mailbox command queue */
+	list_splice_init(&phba->sli.mboxq_cmpl, &completions);
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+	/* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
+	while (!list_empty(&completions)) {
+		list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
+		pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
+		if (pmb->mbox_cmpl)
+			pmb->mbox_cmpl(phba, pmb);
+	}
+}
+
+/**
+ * lpfc_sli_host_down - Vport cleanup function
+ * @vport: Pointer to virtual port object.
+ *
+ * lpfc_sli_host_down is called to clean up the resources
+ * associated with a vport before destroying virtual
+ * port data structures.
+ * This function does following operations:
+ * - Free discovery resources associated with this virtual
+ *   port.
+ * - Free iocbs associated with this virtual port in
+ *   the txq.
+ * - Send abort for all iocb commands associated with this
+ *   vport in txcmplq.
+ *
+ * This function is called with no lock held and always returns 1.
+ **/
+int
+lpfc_sli_host_down(struct lpfc_vport *vport)
+{
+	LIST_HEAD(completions);
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *pring;
+	struct lpfc_iocbq *iocb, *next_iocb;
+	int i;
+	unsigned long flags = 0;
+	uint16_t prev_pring_flag;
+
+	lpfc_cleanup_discovery_resources(vport);
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	for (i = 0; i < psli->num_rings; i++) {
+		pring = &psli->ring[i];
+		prev_pring_flag = pring->flag;
+		/* Only slow rings */
+		if (pring->ringno == LPFC_ELS_RING) {
+			pring->flag |= LPFC_DEFERRED_RING_EVENT;
+			/* Set the lpfc data pending flag */
+			set_bit(LPFC_DATA_READY, &phba->data_flags);
+		}
+		/*
+		 * Error everything on the txq since these iocbs have not been
+		 * given to the FW yet.
+		 */
+		list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+			if (iocb->vport != vport)
+				continue;
+			list_move_tail(&iocb->list, &completions);
+			pring->txq_cnt--;
+		}
+
+		/* Next issue ABTS for everything on the txcmplq */
+		list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
+									list) {
+			if (iocb->vport != vport)
+				continue;
+			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+		}
+
+		pring->flag = prev_pring_flag;
+	}
+
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_DOWN);
+	return 1;
+}
+
+/**
+ * lpfc_sli_hba_down - Resource cleanup function for the HBA
+ * @phba: Pointer to HBA context object.
+ *
+ * This function cleans up all iocb, buffers, mailbox commands
+ * while shutting down the HBA. This function is called with no
+ * lock held and always returns 1.
+ * This function does the following to cleanup driver resources:
+ * - Free discovery resources for each virtual port
+ * - Cleanup any pending fabric iocbs
+ * - Iterate through the iocb txq and free each entry
+ *   in the list.
+ * - Free up any buffer posted to the HBA
+ * - Free mailbox commands in the mailbox queue.
+ **/
+int
+lpfc_sli_hba_down(struct lpfc_hba *phba)
+{
+	LIST_HEAD(completions);
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *pring;
+	struct lpfc_dmabuf *buf_ptr;
+	unsigned long flags = 0;
+	int i;
+
+	/* Shutdown the mailbox command sub-system */
+	lpfc_sli_mbox_sys_shutdown(phba);
+
+	lpfc_hba_down_prep(phba);
+
+	lpfc_fabric_abort_hba(phba);
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	for (i = 0; i < psli->num_rings; i++) {
+		pring = &psli->ring[i];
+		/* Only slow rings */
+		if (pring->ringno == LPFC_ELS_RING) {
+			pring->flag |= LPFC_DEFERRED_RING_EVENT;
+			/* Set the lpfc data pending flag */
+			set_bit(LPFC_DATA_READY, &phba->data_flags);
+		}
+
+		/*
+		 * Error everything on the txq since these iocbs have not been
+		 * given to the FW yet.
+		 */
+		list_splice_init(&pring->txq, &completions);
+		pring->txq_cnt = 0;
+
+	}
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_DOWN);
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	list_splice_init(&phba->elsbuf, &completions);
+	phba->elsbuf_cnt = 0;
+	phba->elsbuf_prev_cnt = 0;
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
+	while (!list_empty(&completions)) {
+		list_remove_head(&completions, buf_ptr,
+			struct lpfc_dmabuf, list);
+		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+		kfree(buf_ptr);
+	}
+
+	/* Return any active mbox cmds */
+	del_timer_sync(&psli->mbox_tmo);
+
+	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
+	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
+
+	return 1;
+}
+
+/**
+ * lpfc_sli_pcimem_bcopy - SLI memory copy function
+ * @srcp: Source memory pointer.
+ * @destp: Destination memory pointer.
+ * @cnt: Number of words required to be copied.
+ *
+ * This function is used for copying data between driver memory
+ * and the SLI memory. This function also changes the endianness
+ * of each word if native endianness is different from SLI
+ * endianness. This function can be called with or without
+ * lock.
+ **/
+void
+lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
+{
+	uint32_t *src = srcp;
+	uint32_t *dest = destp;
+	uint32_t ldata;
+	int i;
+
+	for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
+		ldata = *src;
+		ldata = le32_to_cpu(ldata);
+		*dest = ldata;
+		src++;
+		dest++;
+	}
+}
+
+
+/**
+ * lpfc_sli_bemem_bcopy - SLI memory copy function
+ * @srcp: Source memory pointer.
+ * @destp: Destination memory pointer.
+ * @cnt: Number of words required to be copied.
+ *
+ * This function is used for copying data between a data structure
+ * with big endian representation to local endianness.
+ * This function can be called with or without lock.
+ **/
+void
+lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
+{
+	uint32_t *src = srcp;
+	uint32_t *dest = destp;
+	uint32_t ldata;
+	int i;
+
+	for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
+		ldata = *src;
+		ldata = be32_to_cpu(ldata);
+		*dest = ldata;
+		src++;
+		dest++;
+	}
+}
+
+/**
+ * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mp: Pointer to driver buffer object.
+ *
+ * This function is called with no lock held.
+ * It always return zero after adding the buffer to the postbufq
+ * buffer list.
+ **/
+int
+lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			 struct lpfc_dmabuf *mp)
+{
+	/* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
+	   later */
+	spin_lock_irq(&phba->hbalock);
+	list_add_tail(&mp->list, &pring->postbufq);
+	pring->postbufq_cnt++;
+	spin_unlock_irq(&phba->hbalock);
+	return 0;
+}
+
+/**
+ * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
+ * @phba: Pointer to HBA context object.
+ *
+ * When HBQ is enabled, buffers are searched based on tags. This function
+ * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
+ * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
+ * does not conflict with tags of buffer posted for unsolicited events.
+ * The function returns the allocated tag. The function is called with
+ * no locks held.
+ **/
+uint32_t
+lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
+{
+	spin_lock_irq(&phba->hbalock);
+	phba->buffer_tag_count++;
+	/*
+	 * Always set the QUE_BUFTAG_BIT to distiguish between
+	 * a tag assigned by HBQ.
+	 */
+	phba->buffer_tag_count |= QUE_BUFTAG_BIT;
+	spin_unlock_irq(&phba->hbalock);
+	return phba->buffer_tag_count;
+}
+
+/**
+ * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @tag: Buffer tag.
+ *
+ * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
+ * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
+ * iocb is posted to the response ring with the tag of the buffer.
+ * This function searches the pring->postbufq list using the tag
+ * to find buffer associated with CMD_IOCB_RET_XRI64_CX
+ * iocb. If the buffer is found then lpfc_dmabuf object of the
+ * buffer is returned to the caller else NULL is returned.
+ * This function is called with no lock held.
+ **/
+struct lpfc_dmabuf *
+lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			uint32_t tag)
+{
+	struct lpfc_dmabuf *mp, *next_mp;
+	struct list_head *slp = &pring->postbufq;
+
+	/* Search postbufq, from the beginning, looking for a match on tag */
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
+		if (mp->buffer_tag == tag) {
+			list_del_init(&mp->list);
+			pring->postbufq_cnt--;
+			spin_unlock_irq(&phba->hbalock);
+			return mp;
+		}
+	}
+
+	spin_unlock_irq(&phba->hbalock);
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0402 Cannot find virtual addr for buffer tag on "
+			"ring %d Data x%lx x%p x%p x%x\n",
+			pring->ringno, (unsigned long) tag,
+			slp->next, slp->prev, pring->postbufq_cnt);
+
+	return NULL;
+}
+
+/**
+ * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @phys: DMA address of the buffer.
+ *
+ * This function searches the buffer list using the dma_address
+ * of unsolicited event to find the driver's lpfc_dmabuf object
+ * corresponding to the dma_address. The function returns the
+ * lpfc_dmabuf object if a buffer is found else it returns NULL.
+ * This function is called by the ct and els unsolicited event
+ * handlers to get the buffer associated with the unsolicited
+ * event.
+ *
+ * This function is called with no lock held.
+ **/
+struct lpfc_dmabuf *
+lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			 dma_addr_t phys)
+{
+	struct lpfc_dmabuf *mp, *next_mp;
+	struct list_head *slp = &pring->postbufq;
+
+	/* Search postbufq, from the beginning, looking for a match on phys */
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
+		if (mp->phys == phys) {
+			list_del_init(&mp->list);
+			pring->postbufq_cnt--;
+			spin_unlock_irq(&phba->hbalock);
+			return mp;
+		}
+	}
+
+	spin_unlock_irq(&phba->hbalock);
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"0410 Cannot find virtual addr for mapped buf on "
+			"ring %d Data x%llx x%p x%p x%x\n",
+			pring->ringno, (unsigned long long)phys,
+			slp->next, slp->prev, pring->postbufq_cnt);
+	return NULL;
+}
+
+/**
+ * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
+ * @phba: Pointer to HBA context object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ * @rspiocb: Pointer to driver response iocb object.
+ *
+ * This function is the completion handler for the abort iocbs for
+ * ELS commands. This function is called from the ELS ring event
+ * handler with no lock held. This function frees memory resources
+ * associated with the abort iocb.
+ **/
+static void
+lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			struct lpfc_iocbq *rspiocb)
+{
+	IOCB_t *irsp = &rspiocb->iocb;
+	uint16_t abort_iotag, abort_context;
+	struct lpfc_iocbq *abort_iocb = NULL;
+
+	if (irsp->ulpStatus) {
+
+		/*
+		 * Assume that the port already completed and returned, or
+		 * will return the iocb. Just Log the message.
+		 */
+		abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
+		abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
+
+		spin_lock_irq(&phba->hbalock);
+		if (phba->sli_rev < LPFC_SLI_REV4) {
+			if (abort_iotag != 0 &&
+				abort_iotag <= phba->sli.last_iotag)
+				abort_iocb =
+					phba->sli.iocbq_lookup[abort_iotag];
+		} else
+			/* For sli4 the abort_tag is the XRI,
+			 * so the abort routine puts the iotag  of the iocb
+			 * being aborted in the context field of the abort
+			 * IOCB.
+			 */
+			abort_iocb = phba->sli.iocbq_lookup[abort_context];
+
+		lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
+				"0327 Cannot abort els iocb %p "
+				"with tag %x context %x, abort status %x, "
+				"abort code %x\n",
+				abort_iocb, abort_iotag, abort_context,
+				irsp->ulpStatus, irsp->un.ulpWord[4]);
+
+		spin_unlock_irq(&phba->hbalock);
+	}
+	lpfc_sli_release_iocbq(phba, cmdiocb);
+	return;
+}
+
+/**
+ * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
+ * @phba: Pointer to HBA context object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ * @rspiocb: Pointer to driver response iocb object.
+ *
+ * The function is called from SLI ring event handler with no
+ * lock held. This function is the completion handler for ELS commands
+ * which are aborted. The function frees memory resources used for
+ * the aborted ELS commands.
+ **/
+static void
+lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		     struct lpfc_iocbq *rspiocb)
+{
+	IOCB_t *irsp = &rspiocb->iocb;
+
+	/* ELS cmd tag <ulpIoTag> completes */
+	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+			"0139 Ignoring ELS cmd tag x%x completion Data: "
+			"x%x x%x x%x\n",
+			irsp->ulpIoTag, irsp->ulpStatus,
+			irsp->un.ulpWord[4], irsp->ulpTimeout);
+	if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
+		lpfc_ct_free_iocb(phba, cmdiocb);
+	else
+		lpfc_els_free_iocb(phba, cmdiocb);
+	return;
+}
+
+/**
+ * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ *
+ * This function issues an abort iocb for the provided command iocb down to
+ * the port. Other than the case the outstanding command iocb is an abort
+ * request, this function issues abort out unconditionally. This function is
+ * called with hbalock held. The function returns 0 when it fails due to
+ * memory allocation failure or when the command iocb is an abort request.
+ **/
+static int
+lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			   struct lpfc_iocbq *cmdiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	struct lpfc_iocbq *abtsiocbp;
+	IOCB_t *icmd = NULL;
+	IOCB_t *iabt = NULL;
+	int retval;
+
+	/*
+	 * There are certain command types we don't want to abort.  And we
+	 * don't want to abort commands that are already in the process of
+	 * being aborted.
+	 */
+	icmd = &cmdiocb->iocb;
+	if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
+	    icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
+	    (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
+		return 0;
+
+	/* issue ABTS for this IOCB based on iotag */
+	abtsiocbp = __lpfc_sli_get_iocbq(phba);
+	if (abtsiocbp == NULL)
+		return 0;
+
+	/* This signals the response to set the correct status
+	 * before calling the completion handler
+	 */
+	cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
+
+	iabt = &abtsiocbp->iocb;
+	iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
+	iabt->un.acxri.abortContextTag = icmd->ulpContext;
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
+		iabt->un.acxri.abortContextTag = cmdiocb->iotag;
+	}
+	else
+		iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
+	iabt->ulpLe = 1;
+	iabt->ulpClass = icmd->ulpClass;
+
+	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
+	abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
+	if (cmdiocb->iocb_flag & LPFC_IO_FCP)
+		abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
+
+	if (phba->link_state >= LPFC_LINK_UP)
+		iabt->ulpCommand = CMD_ABORT_XRI_CN;
+	else
+		iabt->ulpCommand = CMD_CLOSE_XRI_CN;
+
+	abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+			 "0339 Abort xri x%x, original iotag x%x, "
+			 "abort cmd iotag x%x\n",
+			 iabt->un.acxri.abortIoTag,
+			 iabt->un.acxri.abortContextTag,
+			 abtsiocbp->iotag);
+	retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
+
+	if (retval)
+		__lpfc_sli_release_iocbq(phba, abtsiocbp);
+
+	/*
+	 * Caller to this routine should check for IOCB_ERROR
+	 * and handle it properly.  This routine no longer removes
+	 * iocb off txcmplq and call compl in case of IOCB_ERROR.
+	 */
+	return retval;
+}
+
+/**
+ * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ *
+ * This function issues an abort iocb for the provided command iocb. In case
+ * of unloading, the abort iocb will not be issued to commands on the ELS
+ * ring. Instead, the callback function shall be changed to those commands
+ * so that nothing happens when them finishes. This function is called with
+ * hbalock held. The function returns 0 when the command iocb is an abort
+ * request.
+ **/
+int
+lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			   struct lpfc_iocbq *cmdiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	int retval = IOCB_ERROR;
+	IOCB_t *icmd = NULL;
+
+	/*
+	 * There are certain command types we don't want to abort.  And we
+	 * don't want to abort commands that are already in the process of
+	 * being aborted.
+	 */
+	icmd = &cmdiocb->iocb;
+	if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
+	    icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
+	    (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
+		return 0;
+
+	/*
+	 * If we're unloading, don't abort iocb on the ELS ring, but change
+	 * the callback so that nothing happens when it finishes.
+	 */
+	if ((vport->load_flag & FC_UNLOADING) &&
+	    (pring->ringno == LPFC_ELS_RING)) {
+		if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
+			cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+		else
+			cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+		goto abort_iotag_exit;
+	}
+
+	/* Now, we try to issue the abort to the cmdiocb out */
+	retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
+
+abort_iotag_exit:
+	/*
+	 * Caller to this routine should check for IOCB_ERROR
+	 * and handle it properly.  This routine no longer removes
+	 * iocb off txcmplq and call compl in case of IOCB_ERROR.
+	 */
+	return retval;
+}
+
+/**
+ * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function aborts all iocbs in the given ring and frees all the iocb
+ * objects in txq. This function issues abort iocbs unconditionally for all
+ * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
+ * to complete before the return of this function. The caller is not required
+ * to hold any locks.
+ **/
+static void
+lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	LIST_HEAD(completions);
+	struct lpfc_iocbq *iocb, *next_iocb;
+
+	if (pring->ringno == LPFC_ELS_RING)
+		lpfc_fabric_abort_hba(phba);
+
+	spin_lock_irq(&phba->hbalock);
+
+	/* Take off all the iocbs on txq for cancelling */
+	list_splice_init(&pring->txq, &completions);
+	pring->txq_cnt = 0;
+
+	/* Next issue ABTS for everything on the txcmplq */
+	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+		lpfc_sli_abort_iotag_issue(phba, pring, iocb);
+
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_ABORTED);
+}
+
+/**
+ * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will abort all pending and outstanding iocbs to an HBA.
+ **/
+void
+lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *pring;
+	int i;
+
+	for (i = 0; i < psli->num_rings; i++) {
+		pring = &psli->ring[i];
+		lpfc_sli_iocb_ring_abort(phba, pring);
+	}
+}
+
+/**
+ * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
+ * @iocbq: Pointer to driver iocb object.
+ * @vport: Pointer to driver virtual port object.
+ * @tgt_id: SCSI ID of the target.
+ * @lun_id: LUN ID of the scsi device.
+ * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
+ *
+ * This function acts as an iocb filter for functions which abort or count
+ * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
+ * 0 if the filtering criteria is met for the given iocb and will return
+ * 1 if the filtering criteria is not met.
+ * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
+ * given iocb is for the SCSI device specified by vport, tgt_id and
+ * lun_id parameter.
+ * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
+ * given iocb is for the SCSI target specified by vport and tgt_id
+ * parameters.
+ * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
+ * given iocb is for the SCSI host associated with the given vport.
+ * This function is called with no locks held.
+ **/
+static int
+lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
+			   uint16_t tgt_id, uint64_t lun_id,
+			   lpfc_ctx_cmd ctx_cmd)
+{
+	struct lpfc_scsi_buf *lpfc_cmd;
+	int rc = 1;
+
+	if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
+		return rc;
+
+	if (iocbq->vport != vport)
+		return rc;
+
+	lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
+
+	if (lpfc_cmd->pCmd == NULL)
+		return rc;
+
+	switch (ctx_cmd) {
+	case LPFC_CTX_LUN:
+		if ((lpfc_cmd->rdata->pnode) &&
+		    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
+		    (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
+			rc = 0;
+		break;
+	case LPFC_CTX_TGT:
+		if ((lpfc_cmd->rdata->pnode) &&
+		    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
+			rc = 0;
+		break;
+	case LPFC_CTX_HOST:
+		rc = 0;
+		break;
+	default:
+		printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
+			__func__, ctx_cmd);
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
+ * @vport: Pointer to virtual port.
+ * @tgt_id: SCSI ID of the target.
+ * @lun_id: LUN ID of the scsi device.
+ * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
+ *
+ * This function returns number of FCP commands pending for the vport.
+ * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
+ * commands pending on the vport associated with SCSI device specified
+ * by tgt_id and lun_id parameters.
+ * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
+ * commands pending on the vport associated with SCSI target specified
+ * by tgt_id parameter.
+ * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
+ * commands pending on the vport.
+ * This function returns the number of iocbs which satisfy the filter.
+ * This function is called without any lock held.
+ **/
+int
+lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
+		  lpfc_ctx_cmd ctx_cmd)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_iocbq *iocbq;
+	int sum, i;
+
+	for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
+		iocbq = phba->sli.iocbq_lookup[i];
+
+		if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
+						ctx_cmd) == 0)
+			sum++;
+	}
+
+	return sum;
+}
+
+/**
+ * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
+ * @phba: Pointer to HBA context object
+ * @cmdiocb: Pointer to command iocb object.
+ * @rspiocb: Pointer to response iocb object.
+ *
+ * This function is called when an aborted FCP iocb completes. This
+ * function is called by the ring event handler with no lock held.
+ * This function frees the iocb.
+ **/
+void
+lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+			struct lpfc_iocbq *rspiocb)
+{
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"3096 ABORT_XRI_CN completing on xri x%x "
+			"original iotag x%x, abort cmd iotag x%x "
+			"status 0x%x, reason 0x%x\n",
+			cmdiocb->iocb.un.acxri.abortContextTag,
+			cmdiocb->iocb.un.acxri.abortIoTag,
+			cmdiocb->iotag, rspiocb->iocb.ulpStatus,
+			rspiocb->iocb.un.ulpWord[4]);
+	lpfc_sli_release_iocbq(phba, cmdiocb);
+	return;
+}
+
+/**
+ * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
+ * @vport: Pointer to virtual port.
+ * @pring: Pointer to driver SLI ring object.
+ * @tgt_id: SCSI ID of the target.
+ * @lun_id: LUN ID of the scsi device.
+ * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
+ *
+ * This function sends an abort command for every SCSI command
+ * associated with the given virtual port pending on the ring
+ * filtered by lpfc_sli_validate_fcp_iocb function.
+ * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
+ * FCP iocbs associated with lun specified by tgt_id and lun_id
+ * parameters
+ * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
+ * FCP iocbs associated with SCSI target specified by tgt_id parameter.
+ * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
+ * FCP iocbs associated with virtual port.
+ * This function returns number of iocbs it failed to abort.
+ * This function is called with no locks held.
+ **/
+int
+lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
+		    uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_iocbq *iocbq;
+	struct lpfc_iocbq *abtsiocb;
+	IOCB_t *cmd = NULL;
+	int errcnt = 0, ret_val = 0;
+	int i;
+
+	for (i = 1; i <= phba->sli.last_iotag; i++) {
+		iocbq = phba->sli.iocbq_lookup[i];
+
+		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
+					       abort_cmd) != 0)
+			continue;
+
+		/* issue ABTS for this IOCB based on iotag */
+		abtsiocb = lpfc_sli_get_iocbq(phba);
+		if (abtsiocb == NULL) {
+			errcnt++;
+			continue;
+		}
+
+		cmd = &iocbq->iocb;
+		abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
+		abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
+		else
+			abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
+		abtsiocb->iocb.ulpLe = 1;
+		abtsiocb->iocb.ulpClass = cmd->ulpClass;
+		abtsiocb->vport = phba->pport;
+
+		/* ABTS WQE must go to the same WQ as the WQE to be aborted */
+		abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
+		if (iocbq->iocb_flag & LPFC_IO_FCP)
+			abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
+
+		if (lpfc_is_link_up(phba))
+			abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
+		else
+			abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
+
+		/* Setup callback routine and issue the command. */
+		abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
+		ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
+					      abtsiocb, 0);
+		if (ret_val == IOCB_ERROR) {
+			lpfc_sli_release_iocbq(phba, abtsiocb);
+			errcnt++;
+			continue;
+		}
+	}
+
+	return errcnt;
+}
+
+/**
+ * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_sli_issue_iocb_wait function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from other thread which
+ * cleans up the SLI layer objects.
+ * This function copy the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
+			struct lpfc_iocbq *cmdiocbq,
+			struct lpfc_iocbq *rspiocbq)
+{
+	wait_queue_head_t *pdone_q;
+	unsigned long iflags;
+	struct lpfc_scsi_buf *lpfc_cmd;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
+	if (cmdiocbq->context2 && rspiocbq)
+		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
+		       &rspiocbq->iocb, sizeof(IOCB_t));
+
+	/* Set the exchange busy flag for task management commands */
+	if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
+		!(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
+		lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
+			cur_iocbq);
+		lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
+	}
+
+	pdone_q = cmdiocbq->context_un.wait_queue;
+	if (pdone_q)
+		wake_up(pdone_q);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return;
+}
+
+/**
+ * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
+ * @phba: Pointer to HBA context object..
+ * @piocbq: Pointer to command iocb.
+ * @flag: Flag to test.
+ *
+ * This routine grabs the hbalock and then test the iocb_flag to
+ * see if the passed in flag is set.
+ * Returns:
+ * 1 if flag is set.
+ * 0 if flag is not set.
+ **/
+static int
+lpfc_chk_iocb_flg(struct lpfc_hba *phba,
+		 struct lpfc_iocbq *piocbq, uint32_t flag)
+{
+	unsigned long iflags;
+	int ret;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	ret = piocbq->iocb_flag & flag;
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return ret;
+
+}
+
+/**
+ * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
+ * @phba: Pointer to HBA context object..
+ * @pring: Pointer to sli ring.
+ * @piocb: Pointer to command iocb.
+ * @prspiocbq: Pointer to response iocb.
+ * @timeout: Timeout in number of seconds.
+ *
+ * This function issues the iocb to firmware and waits for the
+ * iocb to complete. If the iocb command is not
+ * completed within timeout seconds, it returns IOCB_TIMEDOUT.
+ * Caller should not free the iocb resources if this function
+ * returns IOCB_TIMEDOUT.
+ * The function waits for the iocb completion using an
+ * non-interruptible wait.
+ * This function will sleep while waiting for iocb completion.
+ * So, this function should not be called from any context which
+ * does not allow sleeping. Due to the same reason, this function
+ * cannot be called with interrupt disabled.
+ * This function assumes that the iocb completions occur while
+ * this function sleep. So, this function cannot be called from
+ * the thread which process iocb completion for this ring.
+ * This function clears the iocb_flag of the iocb object before
+ * issuing the iocb and the iocb completion handler sets this
+ * flag and wakes this thread when the iocb completes.
+ * The contents of the response iocb will be copied to prspiocbq
+ * by the completion handler when the command completes.
+ * This function returns IOCB_SUCCESS when success.
+ * This function is called with no lock held.
+ **/
+int
+lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
+			 uint32_t ring_number,
+			 struct lpfc_iocbq *piocb,
+			 struct lpfc_iocbq *prspiocbq,
+			 uint32_t timeout)
+{
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
+	long timeleft, timeout_req = 0;
+	int retval = IOCB_SUCCESS;
+	uint32_t creg_val;
+	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+	/*
+	 * If the caller has provided a response iocbq buffer, then context2
+	 * is NULL or its an error.
+	 */
+	if (prspiocbq) {
+		if (piocb->context2)
+			return IOCB_ERROR;
+		piocb->context2 = prspiocbq;
+	}
+
+	piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
+	piocb->context_un.wait_queue = &done_q;
+	piocb->iocb_flag &= ~LPFC_IO_WAKE;
+
+	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+		if (lpfc_readl(phba->HCregaddr, &creg_val))
+			return IOCB_ERROR;
+		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+		writel(creg_val, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+	}
+
+	retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
+				     SLI_IOCB_RET_IOCB);
+	if (retval == IOCB_SUCCESS) {
+		timeout_req = timeout * HZ;
+		timeleft = wait_event_timeout(done_q,
+				lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
+				timeout_req);
+
+		if (piocb->iocb_flag & LPFC_IO_WAKE) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+					"0331 IOCB wake signaled\n");
+		} else if (timeleft == 0) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"0338 IOCB wait timeout error - no "
+					"wake response Data x%x\n", timeout);
+			retval = IOCB_TIMEDOUT;
+		} else {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"0330 IOCB wake NOT set, "
+					"Data x%x x%lx\n",
+					timeout, (timeleft / jiffies));
+			retval = IOCB_TIMEDOUT;
+		}
+	} else if (retval == IOCB_BUSY) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
+			phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt);
+		return retval;
+	} else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"0332 IOCB wait issue failed, Data x%x\n",
+				retval);
+		retval = IOCB_ERROR;
+	}
+
+	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+		if (lpfc_readl(phba->HCregaddr, &creg_val))
+			return IOCB_ERROR;
+		creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
+		writel(creg_val, phba->HCregaddr);
+		readl(phba->HCregaddr); /* flush */
+	}
+
+	if (prspiocbq)
+		piocb->context2 = NULL;
+
+	piocb->context_un.wait_queue = NULL;
+	piocb->iocb_cmpl = NULL;
+	return retval;
+}
+
+/**
+ * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to driver mailbox object.
+ * @timeout: Timeout in number of seconds.
+ *
+ * This function issues the mailbox to firmware and waits for the
+ * mailbox command to complete. If the mailbox command is not
+ * completed within timeout seconds, it returns MBX_TIMEOUT.
+ * The function waits for the mailbox completion using an
+ * interruptible wait. If the thread is woken up due to a
+ * signal, MBX_TIMEOUT error is returned to the caller. Caller
+ * should not free the mailbox resources, if this function returns
+ * MBX_TIMEOUT.
+ * This function will sleep while waiting for mailbox completion.
+ * So, this function should not be called from any context which
+ * does not allow sleeping. Due to the same reason, this function
+ * cannot be called with interrupt disabled.
+ * This function assumes that the mailbox completion occurs while
+ * this function sleep. So, this function cannot be called from
+ * the worker thread which processes mailbox completion.
+ * This function is called in the context of HBA management
+ * applications.
+ * This function returns MBX_SUCCESS when successful.
+ * This function is called with no lock held.
+ **/
+int
+lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
+			 uint32_t timeout)
+{
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
+	int retval;
+	unsigned long flag;
+
+	/* The caller must leave context1 empty. */
+	if (pmboxq->context1)
+		return MBX_NOT_FINISHED;
+
+	pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
+	/* setup wake call as IOCB callback */
+	pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
+	/* setup context field to pass wait_queue pointer to wake function  */
+	pmboxq->context1 = &done_q;
+
+	/* now issue the command */
+	retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+	if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
+		wait_event_interruptible_timeout(done_q,
+				pmboxq->mbox_flag & LPFC_MBX_WAKE,
+				timeout * HZ);
+
+		spin_lock_irqsave(&phba->hbalock, flag);
+		pmboxq->context1 = NULL;
+		/*
+		 * if LPFC_MBX_WAKE flag is set the mailbox is completed
+		 * else do not free the resources.
+		 */
+		if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
+			retval = MBX_SUCCESS;
+			lpfc_sli4_swap_str(phba, pmboxq);
+		} else {
+			retval = MBX_TIMEOUT;
+			pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		}
+		spin_unlock_irqrestore(&phba->hbalock, flag);
+	}
+
+	return retval;
+}
+
+/**
+ * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called to shutdown the driver's mailbox sub-system.
+ * It first marks the mailbox sub-system is in a block state to prevent
+ * the asynchronous mailbox command from issued off the pending mailbox
+ * command queue. If the mailbox command sub-system shutdown is due to
+ * HBA error conditions such as EEH or ERATT, this routine shall invoke
+ * the mailbox sub-system flush routine to forcefully bring down the
+ * mailbox sub-system. Otherwise, if it is due to normal condition (such
+ * as with offline or HBA function reset), this routine will wait for the
+ * outstanding mailbox command to complete before invoking the mailbox
+ * sub-system flush routine to gracefully bring down mailbox sub-system.
+ **/
+void
+lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	unsigned long timeout;
+
+	timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
+
+	spin_lock_irq(&phba->hbalock);
+	psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
+
+	if (psli->sli_flag & LPFC_SLI_ACTIVE) {
+		/* Determine how long we might wait for the active mailbox
+		 * command to be gracefully completed by firmware.
+		 */
+		if (phba->sli.mbox_active)
+			timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
+						phba->sli.mbox_active) *
+						1000) + jiffies;
+		spin_unlock_irq(&phba->hbalock);
+
+		while (phba->sli.mbox_active) {
+			/* Check active mailbox complete status every 2ms */
+			msleep(2);
+			if (time_after(jiffies, timeout))
+				/* Timeout, let the mailbox flush routine to
+				 * forcefully release active mailbox command
+				 */
+				break;
+		}
+	} else
+		spin_unlock_irq(&phba->hbalock);
+
+	lpfc_sli_mbox_sys_flush(phba);
+}
+
+/**
+ * lpfc_sli_eratt_read - read sli-3 error attention events
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called to read the SLI3 device error attention registers
+ * for possible error attention events. The caller must hold the hostlock
+ * with spin_lock_irq().
+ *
+ * This function returns 1 when there is Error Attention in the Host Attention
+ * Register and returns 0 otherwise.
+ **/
+static int
+lpfc_sli_eratt_read(struct lpfc_hba *phba)
+{
+	uint32_t ha_copy;
+
+	/* Read chip Host Attention (HA) register */
+	if (lpfc_readl(phba->HAregaddr, &ha_copy))
+		goto unplug_err;
+
+	if (ha_copy & HA_ERATT) {
+		/* Read host status register to retrieve error event */
+		if (lpfc_sli_read_hs(phba))
+			goto unplug_err;
+
+		/* Check if there is a deferred error condition is active */
+		if ((HS_FFER1 & phba->work_hs) &&
+		    ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
+		      HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
+			phba->hba_flag |= DEFER_ERATT;
+			/* Clear all interrupt enable conditions */
+			writel(0, phba->HCregaddr);
+			readl(phba->HCregaddr);
+		}
+
+		/* Set the driver HA work bitmap */
+		phba->work_ha |= HA_ERATT;
+		/* Indicate polling handles this ERATT */
+		phba->hba_flag |= HBA_ERATT_HANDLED;
+		return 1;
+	}
+	return 0;
+
+unplug_err:
+	/* Set the driver HS work bitmap */
+	phba->work_hs |= UNPLUG_ERR;
+	/* Set the driver HA work bitmap */
+	phba->work_ha |= HA_ERATT;
+	/* Indicate polling handles this ERATT */
+	phba->hba_flag |= HBA_ERATT_HANDLED;
+	return 1;
+}
+
+/**
+ * lpfc_sli4_eratt_read - read sli-4 error attention events
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called to read the SLI4 device error attention registers
+ * for possible error attention events. The caller must hold the hostlock
+ * with spin_lock_irq().
+ *
+ * This function returns 1 when there is Error Attention in the Host Attention
+ * Register and returns 0 otherwise.
+ **/
+static int
+lpfc_sli4_eratt_read(struct lpfc_hba *phba)
+{
+	uint32_t uerr_sta_hi, uerr_sta_lo;
+	uint32_t if_type, portsmphr;
+	struct lpfc_register portstat_reg;
+
+	/*
+	 * For now, use the SLI4 device internal unrecoverable error
+	 * registers for error attention. This can be changed later.
+	 */
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	switch (if_type) {
+	case LPFC_SLI_INTF_IF_TYPE_0:
+		if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
+			&uerr_sta_lo) ||
+			lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
+			&uerr_sta_hi)) {
+			phba->work_hs |= UNPLUG_ERR;
+			phba->work_ha |= HA_ERATT;
+			phba->hba_flag |= HBA_ERATT_HANDLED;
+			return 1;
+		}
+		if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
+		    (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"1423 HBA Unrecoverable error: "
+					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
+					"ue_mask_lo_reg=0x%x, "
+					"ue_mask_hi_reg=0x%x\n",
+					uerr_sta_lo, uerr_sta_hi,
+					phba->sli4_hba.ue_mask_lo,
+					phba->sli4_hba.ue_mask_hi);
+			phba->work_status[0] = uerr_sta_lo;
+			phba->work_status[1] = uerr_sta_hi;
+			phba->work_ha |= HA_ERATT;
+			phba->hba_flag |= HBA_ERATT_HANDLED;
+			return 1;
+		}
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_2:
+		if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+			&portstat_reg.word0) ||
+			lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
+			&portsmphr)){
+			phba->work_hs |= UNPLUG_ERR;
+			phba->work_ha |= HA_ERATT;
+			phba->hba_flag |= HBA_ERATT_HANDLED;
+			return 1;
+		}
+		if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
+			phba->work_status[0] =
+				readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
+			phba->work_status[1] =
+				readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2885 Port Status Event: "
+					"port status reg 0x%x, "
+					"port smphr reg 0x%x, "
+					"error 1=0x%x, error 2=0x%x\n",
+					portstat_reg.word0,
+					portsmphr,
+					phba->work_status[0],
+					phba->work_status[1]);
+			phba->work_ha |= HA_ERATT;
+			phba->hba_flag |= HBA_ERATT_HANDLED;
+			return 1;
+		}
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_1:
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2886 HBA Error Attention on unsupported "
+				"if type %d.", if_type);
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_check_eratt - check error attention events
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called from timer soft interrupt context to check HBA's
+ * error attention register bit for error attention events.
+ *
+ * This function returns 1 when there is Error Attention in the Host Attention
+ * Register and returns 0 otherwise.
+ **/
+int
+lpfc_sli_check_eratt(struct lpfc_hba *phba)
+{
+	uint32_t ha_copy;
+
+	/* If somebody is waiting to handle an eratt, don't process it
+	 * here. The brdkill function will do this.
+	 */
+	if (phba->link_flag & LS_IGNORE_ERATT)
+		return 0;
+
+	/* Check if interrupt handler handles this ERATT */
+	spin_lock_irq(&phba->hbalock);
+	if (phba->hba_flag & HBA_ERATT_HANDLED) {
+		/* Interrupt handler has handled ERATT */
+		spin_unlock_irq(&phba->hbalock);
+		return 0;
+	}
+
+	/*
+	 * If there is deferred error attention, do not check for error
+	 * attention
+	 */
+	if (unlikely(phba->hba_flag & DEFER_ERATT)) {
+		spin_unlock_irq(&phba->hbalock);
+		return 0;
+	}
+
+	/* If PCI channel is offline, don't process it */
+	if (unlikely(pci_channel_offline(phba->pcidev))) {
+		spin_unlock_irq(&phba->hbalock);
+		return 0;
+	}
+
+	switch (phba->sli_rev) {
+	case LPFC_SLI_REV2:
+	case LPFC_SLI_REV3:
+		/* Read chip Host Attention (HA) register */
+		ha_copy = lpfc_sli_eratt_read(phba);
+		break;
+	case LPFC_SLI_REV4:
+		/* Read device Uncoverable Error (UERR) registers */
+		ha_copy = lpfc_sli4_eratt_read(phba);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0299 Invalid SLI revision (%d)\n",
+				phba->sli_rev);
+		ha_copy = 0;
+		break;
+	}
+	spin_unlock_irq(&phba->hbalock);
+
+	return ha_copy;
+}
+
+/**
+ * lpfc_intr_state_check - Check device state for interrupt handling
+ * @phba: Pointer to HBA context.
+ *
+ * This inline routine checks whether a device or its PCI slot is in a state
+ * that the interrupt should be handled.
+ *
+ * This function returns 0 if the device or the PCI slot is in a state that
+ * interrupt should be handled, otherwise -EIO.
+ */
+static inline int
+lpfc_intr_state_check(struct lpfc_hba *phba)
+{
+	/* If the pci channel is offline, ignore all the interrupts */
+	if (unlikely(pci_channel_offline(phba->pcidev)))
+		return -EIO;
+
+	/* Update device level interrupt statistics */
+	phba->sli.slistat.sli_intr++;
+
+	/* Ignore all interrupts during initialization. */
+	if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+		return -EIO;
+
+	return 0;
+}
+
+/**
+ * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-3 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there are slow-path events in
+ * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
+ * interrupt mode, this function is called as part of the device-level
+ * interrupt handler. When the PCI slot is in error recovery or the HBA
+ * is undergoing initialization, the interrupt handler will not process
+ * the interrupt. The link attention and ELS ring attention events are
+ * handled by the worker thread. The interrupt handler signals the worker
+ * thread and returns for these events. This function is called without
+ * any lock held. It gets the hbalock to access and update SLI data
+ * structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli_sp_intr_handler(int irq, void *dev_id)
+{
+	struct lpfc_hba  *phba;
+	uint32_t ha_copy, hc_copy;
+	uint32_t work_ha_copy;
+	unsigned long status;
+	unsigned long iflag;
+	uint32_t control;
+
+	MAILBOX_t *mbox, *pmbox;
+	struct lpfc_vport *vport;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_dmabuf *mp;
+	LPFC_MBOXQ_t *pmb;
+	int rc;
+
+	/*
+	 * Get the driver's phba structure from the dev_id and
+	 * assume the HBA is not interrupting.
+	 */
+	phba = (struct lpfc_hba *)dev_id;
+
+	if (unlikely(!phba))
+		return IRQ_NONE;
+
+	/*
+	 * Stuff needs to be attented to when this function is invoked as an
+	 * individual interrupt handler in MSI-X multi-message interrupt mode
+	 */
+	if (phba->intr_type == MSIX) {
+		/* Check device state for handling interrupt */
+		if (lpfc_intr_state_check(phba))
+			return IRQ_NONE;
+		/* Need to read HA REG for slow-path events */
+		spin_lock_irqsave(&phba->hbalock, iflag);
+		if (lpfc_readl(phba->HAregaddr, &ha_copy))
+			goto unplug_error;
+		/* If somebody is waiting to handle an eratt don't process it
+		 * here. The brdkill function will do this.
+		 */
+		if (phba->link_flag & LS_IGNORE_ERATT)
+			ha_copy &= ~HA_ERATT;
+		/* Check the need for handling ERATT in interrupt handler */
+		if (ha_copy & HA_ERATT) {
+			if (phba->hba_flag & HBA_ERATT_HANDLED)
+				/* ERATT polling has handled ERATT */
+				ha_copy &= ~HA_ERATT;
+			else
+				/* Indicate interrupt handler handles ERATT */
+				phba->hba_flag |= HBA_ERATT_HANDLED;
+		}
+
+		/*
+		 * If there is deferred error attention, do not check for any
+		 * interrupt.
+		 */
+		if (unlikely(phba->hba_flag & DEFER_ERATT)) {
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			return IRQ_NONE;
+		}
+
+		/* Clear up only attention source related to slow-path */
+		if (lpfc_readl(phba->HCregaddr, &hc_copy))
+			goto unplug_error;
+
+		writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
+			HC_LAINT_ENA | HC_ERINT_ENA),
+			phba->HCregaddr);
+		writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
+			phba->HAregaddr);
+		writel(hc_copy, phba->HCregaddr);
+		readl(phba->HAregaddr); /* flush */
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+	} else
+		ha_copy = phba->ha_copy;
+
+	work_ha_copy = ha_copy & phba->work_ha_mask;
+
+	if (work_ha_copy) {
+		if (work_ha_copy & HA_LATT) {
+			if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
+				/*
+				 * Turn off Link Attention interrupts
+				 * until CLEAR_LA done
+				 */
+				spin_lock_irqsave(&phba->hbalock, iflag);
+				phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
+				if (lpfc_readl(phba->HCregaddr, &control))
+					goto unplug_error;
+				control &= ~HC_LAINT_ENA;
+				writel(control, phba->HCregaddr);
+				readl(phba->HCregaddr); /* flush */
+				spin_unlock_irqrestore(&phba->hbalock, iflag);
+			}
+			else
+				work_ha_copy &= ~HA_LATT;
+		}
+
+		if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
+			/*
+			 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
+			 * the only slow ring.
+			 */
+			status = (work_ha_copy &
+				(HA_RXMASK  << (4*LPFC_ELS_RING)));
+			status >>= (4*LPFC_ELS_RING);
+			if (status & HA_RXMASK) {
+				spin_lock_irqsave(&phba->hbalock, iflag);
+				if (lpfc_readl(phba->HCregaddr, &control))
+					goto unplug_error;
+
+				lpfc_debugfs_slow_ring_trc(phba,
+				"ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
+				control, status,
+				(uint32_t)phba->sli.slistat.sli_intr);
+
+				if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
+					lpfc_debugfs_slow_ring_trc(phba,
+						"ISR Disable ring:"
+						"pwork:x%x hawork:x%x wait:x%x",
+						phba->work_ha, work_ha_copy,
+						(uint32_t)((unsigned long)
+						&phba->work_waitq));
+
+					control &=
+					    ~(HC_R0INT_ENA << LPFC_ELS_RING);
+					writel(control, phba->HCregaddr);
+					readl(phba->HCregaddr); /* flush */
+				}
+				else {
+					lpfc_debugfs_slow_ring_trc(phba,
+						"ISR slow ring:   pwork:"
+						"x%x hawork:x%x wait:x%x",
+						phba->work_ha, work_ha_copy,
+						(uint32_t)((unsigned long)
+						&phba->work_waitq));
+				}
+				spin_unlock_irqrestore(&phba->hbalock, iflag);
+			}
+		}
+		spin_lock_irqsave(&phba->hbalock, iflag);
+		if (work_ha_copy & HA_ERATT) {
+			if (lpfc_sli_read_hs(phba))
+				goto unplug_error;
+			/*
+			 * Check if there is a deferred error condition
+			 * is active
+			 */
+			if ((HS_FFER1 & phba->work_hs) &&
+				((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
+				  HS_FFER6 | HS_FFER7 | HS_FFER8) &
+				  phba->work_hs)) {
+				phba->hba_flag |= DEFER_ERATT;
+				/* Clear all interrupt enable conditions */
+				writel(0, phba->HCregaddr);
+				readl(phba->HCregaddr);
+			}
+		}
+
+		if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
+			pmb = phba->sli.mbox_active;
+			pmbox = &pmb->u.mb;
+			mbox = phba->mbox;
+			vport = pmb->vport;
+
+			/* First check out the status word */
+			lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
+			if (pmbox->mbxOwner != OWN_HOST) {
+				spin_unlock_irqrestore(&phba->hbalock, iflag);
+				/*
+				 * Stray Mailbox Interrupt, mbxCommand <cmd>
+				 * mbxStatus <status>
+				 */
+				lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
+						LOG_SLI,
+						"(%d):0304 Stray Mailbox "
+						"Interrupt mbxCommand x%x "
+						"mbxStatus x%x\n",
+						(vport ? vport->vpi : 0),
+						pmbox->mbxCommand,
+						pmbox->mbxStatus);
+				/* clear mailbox attention bit */
+				work_ha_copy &= ~HA_MBATT;
+			} else {
+				phba->sli.mbox_active = NULL;
+				spin_unlock_irqrestore(&phba->hbalock, iflag);
+				phba->last_completion_time = jiffies;
+				del_timer(&phba->sli.mbox_tmo);
+				if (pmb->mbox_cmpl) {
+					lpfc_sli_pcimem_bcopy(mbox, pmbox,
+							MAILBOX_CMD_SIZE);
+					if (pmb->out_ext_byte_len &&
+						pmb->context2)
+						lpfc_sli_pcimem_bcopy(
+						phba->mbox_ext,
+						pmb->context2,
+						pmb->out_ext_byte_len);
+				}
+				if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
+					pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
+
+					lpfc_debugfs_disc_trc(vport,
+						LPFC_DISC_TRC_MBOX_VPORT,
+						"MBOX dflt rpi: : "
+						"status:x%x rpi:x%x",
+						(uint32_t)pmbox->mbxStatus,
+						pmbox->un.varWords[0], 0);
+
+					if (!pmbox->mbxStatus) {
+						mp = (struct lpfc_dmabuf *)
+							(pmb->context1);
+						ndlp = (struct lpfc_nodelist *)
+							pmb->context2;
+
+						/* Reg_LOGIN of dflt RPI was
+						 * successful. new lets get
+						 * rid of the RPI using the
+						 * same mbox buffer.
+						 */
+						lpfc_unreg_login(phba,
+							vport->vpi,
+							pmbox->un.varWords[0],
+							pmb);
+						pmb->mbox_cmpl =
+							lpfc_mbx_cmpl_dflt_rpi;
+						pmb->context1 = mp;
+						pmb->context2 = ndlp;
+						pmb->vport = vport;
+						rc = lpfc_sli_issue_mbox(phba,
+								pmb,
+								MBX_NOWAIT);
+						if (rc != MBX_BUSY)
+							lpfc_printf_log(phba,
+							KERN_ERR,
+							LOG_MBOX | LOG_SLI,
+							"0350 rc should have"
+							"been MBX_BUSY\n");
+						if (rc != MBX_NOT_FINISHED)
+							goto send_current_mbox;
+					}
+				}
+				spin_lock_irqsave(
+						&phba->pport->work_port_lock,
+						iflag);
+				phba->pport->work_port_events &=
+					~WORKER_MBOX_TMO;
+				spin_unlock_irqrestore(
+						&phba->pport->work_port_lock,
+						iflag);
+				lpfc_mbox_cmpl_put(phba, pmb);
+			}
+		} else
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+		if ((work_ha_copy & HA_MBATT) &&
+		    (phba->sli.mbox_active == NULL)) {
+send_current_mbox:
+			/* Process next mailbox command if there is one */
+			do {
+				rc = lpfc_sli_issue_mbox(phba, NULL,
+							 MBX_NOWAIT);
+			} while (rc == MBX_NOT_FINISHED);
+			if (rc != MBX_SUCCESS)
+				lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
+						LOG_SLI, "0349 rc should be "
+						"MBX_SUCCESS\n");
+		}
+
+		spin_lock_irqsave(&phba->hbalock, iflag);
+		phba->work_ha |= work_ha_copy;
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		lpfc_worker_wake_up(phba);
+	}
+	return IRQ_HANDLED;
+unplug_error:
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return IRQ_HANDLED;
+
+} /* lpfc_sli_sp_intr_handler */
+
+/**
+ * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-3 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
+ * ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The SCSI FCP fast-path ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli_fp_intr_handler(int irq, void *dev_id)
+{
+	struct lpfc_hba  *phba;
+	uint32_t ha_copy;
+	unsigned long status;
+	unsigned long iflag;
+
+	/* Get the driver's phba structure from the dev_id and
+	 * assume the HBA is not interrupting.
+	 */
+	phba = (struct lpfc_hba *) dev_id;
+
+	if (unlikely(!phba))
+		return IRQ_NONE;
+
+	/*
+	 * Stuff needs to be attented to when this function is invoked as an
+	 * individual interrupt handler in MSI-X multi-message interrupt mode
+	 */
+	if (phba->intr_type == MSIX) {
+		/* Check device state for handling interrupt */
+		if (lpfc_intr_state_check(phba))
+			return IRQ_NONE;
+		/* Need to read HA REG for FCP ring and other ring events */
+		if (lpfc_readl(phba->HAregaddr, &ha_copy))
+			return IRQ_HANDLED;
+		/* Clear up only attention source related to fast-path */
+		spin_lock_irqsave(&phba->hbalock, iflag);
+		/*
+		 * If there is deferred error attention, do not check for
+		 * any interrupt.
+		 */
+		if (unlikely(phba->hba_flag & DEFER_ERATT)) {
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			return IRQ_NONE;
+		}
+		writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
+			phba->HAregaddr);
+		readl(phba->HAregaddr); /* flush */
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+	} else
+		ha_copy = phba->ha_copy;
+
+	/*
+	 * Process all events on FCP ring. Take the optimized path for FCP IO.
+	 */
+	ha_copy &= ~(phba->work_ha_mask);
+
+	status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
+	status >>= (4*LPFC_FCP_RING);
+	if (status & HA_RXMASK)
+		lpfc_sli_handle_fast_ring_event(phba,
+						&phba->sli.ring[LPFC_FCP_RING],
+						status);
+
+	if (phba->cfg_multi_ring_support == 2) {
+		/*
+		 * Process all events on extra ring. Take the optimized path
+		 * for extra ring IO.
+		 */
+		status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
+		status >>= (4*LPFC_EXTRA_RING);
+		if (status & HA_RXMASK) {
+			lpfc_sli_handle_fast_ring_event(phba,
+					&phba->sli.ring[LPFC_EXTRA_RING],
+					status);
+		}
+	}
+	return IRQ_HANDLED;
+}  /* lpfc_sli_fp_intr_handler */
+
+/**
+ * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is the HBA device-level interrupt handler to device with
+ * SLI-3 interface spec, called from the PCI layer when either MSI or
+ * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
+ * requires driver attention. This function invokes the slow-path interrupt
+ * attention handling function and fast-path interrupt attention handling
+ * function in turn to process the relevant HBA attention events. This
+ * function is called without any lock held. It gets the hbalock to access
+ * and update SLI data structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled, else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli_intr_handler(int irq, void *dev_id)
+{
+	struct lpfc_hba  *phba;
+	irqreturn_t sp_irq_rc, fp_irq_rc;
+	unsigned long status1, status2;
+	uint32_t hc_copy;
+
+	/*
+	 * Get the driver's phba structure from the dev_id and
+	 * assume the HBA is not interrupting.
+	 */
+	phba = (struct lpfc_hba *) dev_id;
+
+	if (unlikely(!phba))
+		return IRQ_NONE;
+
+	/* Check device state for handling interrupt */
+	if (lpfc_intr_state_check(phba))
+		return IRQ_NONE;
+
+	spin_lock(&phba->hbalock);
+	if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
+		spin_unlock(&phba->hbalock);
+		return IRQ_HANDLED;
+	}
+
+	if (unlikely(!phba->ha_copy)) {
+		spin_unlock(&phba->hbalock);
+		return IRQ_NONE;
+	} else if (phba->ha_copy & HA_ERATT) {
+		if (phba->hba_flag & HBA_ERATT_HANDLED)
+			/* ERATT polling has handled ERATT */
+			phba->ha_copy &= ~HA_ERATT;
+		else
+			/* Indicate interrupt handler handles ERATT */
+			phba->hba_flag |= HBA_ERATT_HANDLED;
+	}
+
+	/*
+	 * If there is deferred error attention, do not check for any interrupt.
+	 */
+	if (unlikely(phba->hba_flag & DEFER_ERATT)) {
+		spin_unlock(&phba->hbalock);
+		return IRQ_NONE;
+	}
+
+	/* Clear attention sources except link and error attentions */
+	if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
+		spin_unlock(&phba->hbalock);
+		return IRQ_HANDLED;
+	}
+	writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
+		| HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
+		phba->HCregaddr);
+	writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
+	writel(hc_copy, phba->HCregaddr);
+	readl(phba->HAregaddr); /* flush */
+	spin_unlock(&phba->hbalock);
+
+	/*
+	 * Invokes slow-path host attention interrupt handling as appropriate.
+	 */
+
+	/* status of events with mailbox and link attention */
+	status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
+
+	/* status of events with ELS ring */
+	status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
+	status2 >>= (4*LPFC_ELS_RING);
+
+	if (status1 || (status2 & HA_RXMASK))
+		sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
+	else
+		sp_irq_rc = IRQ_NONE;
+
+	/*
+	 * Invoke fast-path host attention interrupt handling as appropriate.
+	 */
+
+	/* status of events with FCP ring */
+	status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
+	status1 >>= (4*LPFC_FCP_RING);
+
+	/* status of events with extra ring */
+	if (phba->cfg_multi_ring_support == 2) {
+		status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
+		status2 >>= (4*LPFC_EXTRA_RING);
+	} else
+		status2 = 0;
+
+	if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
+		fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
+	else
+		fp_irq_rc = IRQ_NONE;
+
+	/* Return device-level interrupt handling status */
+	return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
+}  /* lpfc_sli_intr_handler */
+
+/**
+ * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 FCP abort XRI events.
+ **/
+void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event;
+
+	/* First, declare the fcp xri abort event has been handled */
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
+	spin_unlock_irq(&phba->hbalock);
+	/* Now, handle all the fcp xri abort events */
+	while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
+		/* Get the first event from the head of the event queue */
+		spin_lock_irq(&phba->hbalock);
+		list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
+				 cq_event, struct lpfc_cq_event, list);
+		spin_unlock_irq(&phba->hbalock);
+		/* Notify aborted XRI for FCP work queue */
+		lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
+		/* Free the event processed back to the free pool */
+		lpfc_sli4_cq_event_release(phba, cq_event);
+	}
+}
+
+/**
+ * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 els abort xri events.
+ **/
+void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
+{
+	struct lpfc_cq_event *cq_event;
+
+	/* First, declare the els xri abort event has been handled */
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
+	spin_unlock_irq(&phba->hbalock);
+	/* Now, handle all the els xri abort events */
+	while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
+		/* Get the first event from the head of the event queue */
+		spin_lock_irq(&phba->hbalock);
+		list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
+				 cq_event, struct lpfc_cq_event, list);
+		spin_unlock_irq(&phba->hbalock);
+		/* Notify aborted XRI for ELS work queue */
+		lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
+		/* Free the event processed back to the free pool */
+		lpfc_sli4_cq_event_release(phba, cq_event);
+	}
+}
+
+/**
+ * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
+ * @phba: pointer to lpfc hba data structure
+ * @pIocbIn: pointer to the rspiocbq
+ * @pIocbOut: pointer to the cmdiocbq
+ * @wcqe: pointer to the complete wcqe
+ *
+ * This routine transfers the fields of a command iocbq to a response iocbq
+ * by copying all the IOCB fields from command iocbq and transferring the
+ * completion status information from the complete wcqe.
+ **/
+static void
+lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
+			      struct lpfc_iocbq *pIocbIn,
+			      struct lpfc_iocbq *pIocbOut,
+			      struct lpfc_wcqe_complete *wcqe)
+{
+	unsigned long iflags;
+	uint32_t status;
+	size_t offset = offsetof(struct lpfc_iocbq, iocb);
+
+	memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
+	       sizeof(struct lpfc_iocbq) - offset);
+	/* Map WCQE parameters into irspiocb parameters */
+	status = bf_get(lpfc_wcqe_c_status, wcqe);
+	pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
+	if (pIocbOut->iocb_flag & LPFC_IO_FCP)
+		if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
+			pIocbIn->iocb.un.fcpi.fcpi_parm =
+					pIocbOut->iocb.un.fcpi.fcpi_parm -
+					wcqe->total_data_placed;
+		else
+			pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
+	else {
+		pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
+		pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
+	}
+
+	/* Convert BG errors for completion status */
+	if (status == CQE_STATUS_DI_ERROR) {
+		pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
+
+		if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
+			pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
+		else
+			pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
+
+		pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
+		if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
+			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+				BGS_GUARD_ERR_MASK;
+		if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
+			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+				BGS_APPTAG_ERR_MASK;
+		if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
+			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+				BGS_REFTAG_ERR_MASK;
+
+		/* Check to see if there was any good data before the error */
+		if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
+			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+				BGS_HI_WATER_MARK_PRESENT_MASK;
+			pIocbIn->iocb.unsli3.sli3_bg.bghm =
+				wcqe->total_data_placed;
+		}
+
+		/*
+		* Set ALL the error bits to indicate we don't know what
+		* type of error it is.
+		*/
+		if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
+			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
+				(BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
+				BGS_GUARD_ERR_MASK);
+	}
+
+	/* Pick up HBA exchange busy condition */
+	if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+	}
+}
+
+/**
+ * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
+ * @phba: Pointer to HBA context object.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an ELS work-queue completion event and construct
+ * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
+ * discovery engine to handle.
+ *
+ * Return: Pointer to the receive IOCBQ, NULL otherwise.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
+			       struct lpfc_iocbq *irspiocbq)
+{
+	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+	struct lpfc_iocbq *cmdiocbq;
+	struct lpfc_wcqe_complete *wcqe;
+	unsigned long iflags;
+
+	wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	pring->stats.iocb_event++;
+	/* Look up the ELS command IOCB and create pseudo response IOCB */
+	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
+				bf_get(lpfc_wcqe_c_request_tag, wcqe));
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	if (unlikely(!cmdiocbq)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"0386 ELS complete with no corresponding "
+				"cmdiocb: iotag (%d)\n",
+				bf_get(lpfc_wcqe_c_request_tag, wcqe));
+		lpfc_sli_release_iocbq(phba, irspiocbq);
+		return NULL;
+	}
+
+	/* Fake the irspiocbq and copy necessary response information */
+	lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
+
+	return irspiocbq;
+}
+
+/**
+ * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
+ * @phba: Pointer to HBA context object.
+ * @cqe: Pointer to mailbox completion queue entry.
+ *
+ * This routine process a mailbox completion queue entry with asynchrous
+ * event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
+{
+	struct lpfc_cq_event *cq_event;
+	unsigned long iflags;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"0392 Async Event: word0:x%x, word1:x%x, "
+			"word2:x%x, word3:x%x\n", mcqe->word0,
+			mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
+
+	/* Allocate a new internal CQ_EVENT entry */
+	cq_event = lpfc_sli4_cq_event_alloc(phba);
+	if (!cq_event) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0394 Failed to allocate CQ_EVENT entry\n");
+		return false;
+	}
+
+	/* Move the CQE into an asynchronous event entry */
+	memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
+	/* Set the async event flag */
+	phba->hba_flag |= ASYNC_EVENT;
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	return true;
+}
+
+/**
+ * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
+ * @phba: Pointer to HBA context object.
+ * @cqe: Pointer to mailbox completion queue entry.
+ *
+ * This routine process a mailbox completion queue entry with mailbox
+ * completion event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
+{
+	uint32_t mcqe_status;
+	MAILBOX_t *mbox, *pmbox;
+	struct lpfc_mqe *mqe;
+	struct lpfc_vport *vport;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_dmabuf *mp;
+	unsigned long iflags;
+	LPFC_MBOXQ_t *pmb;
+	bool workposted = false;
+	int rc;
+
+	/* If not a mailbox complete MCQE, out by checking mailbox consume */
+	if (!bf_get(lpfc_trailer_completed, mcqe))
+		goto out_no_mqe_complete;
+
+	/* Get the reference to the active mbox command */
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	pmb = phba->sli.mbox_active;
+	if (unlikely(!pmb)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+				"1832 No pending MBOX command to handle\n");
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		goto out_no_mqe_complete;
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	mqe = &pmb->u.mqe;
+	pmbox = (MAILBOX_t *)&pmb->u.mqe;
+	mbox = phba->mbox;
+	vport = pmb->vport;
+
+	/* Reset heartbeat timer */
+	phba->last_completion_time = jiffies;
+	del_timer(&phba->sli.mbox_tmo);
+
+	/* Move mbox data to caller's mailbox region, do endian swapping */
+	if (pmb->mbox_cmpl && mbox)
+		lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
+
+	/*
+	 * For mcqe errors, conditionally move a modified error code to
+	 * the mbox so that the error will not be missed.
+	 */
+	mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
+	if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
+		if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
+			bf_set(lpfc_mqe_status, mqe,
+			       (LPFC_MBX_ERROR_RANGE | mcqe_status));
+	}
+	if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
+		pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
+				      "MBOX dflt rpi: status:x%x rpi:x%x",
+				      mcqe_status,
+				      pmbox->un.varWords[0], 0);
+		if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
+			mp = (struct lpfc_dmabuf *)(pmb->context1);
+			ndlp = (struct lpfc_nodelist *)pmb->context2;
+			/* Reg_LOGIN of dflt RPI was successful. Now lets get
+			 * RID of the PPI using the same mbox buffer.
+			 */
+			lpfc_unreg_login(phba, vport->vpi,
+					 pmbox->un.varWords[0], pmb);
+			pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+			pmb->context1 = mp;
+			pmb->context2 = ndlp;
+			pmb->vport = vport;
+			rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+			if (rc != MBX_BUSY)
+				lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
+						LOG_SLI, "0385 rc should "
+						"have been MBX_BUSY\n");
+			if (rc != MBX_NOT_FINISHED)
+				goto send_current_mbox;
+		}
+	}
+	spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
+	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
+
+	/* There is mailbox completion work to do */
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	__lpfc_mbox_cmpl_put(phba, pmb);
+	phba->work_ha |= HA_MBATT;
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	workposted = true;
+
+send_current_mbox:
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	/* Release the mailbox command posting token */
+	phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+	/* Setting active mailbox pointer need to be in sync to flag clear */
+	phba->sli.mbox_active = NULL;
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	/* Wake up worker thread to post the next pending mailbox command */
+	lpfc_worker_wake_up(phba);
+out_no_mqe_complete:
+	if (bf_get(lpfc_trailer_consumed, mcqe))
+		lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
+	return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @cqe: Pointer to mailbox completion queue entry.
+ *
+ * This routine process a mailbox completion queue entry, it invokes the
+ * proper mailbox complete handling or asynchrous event handling routine
+ * according to the MCQE's async bit.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
+{
+	struct lpfc_mcqe mcqe;
+	bool workposted;
+
+	/* Copy the mailbox MCQE and convert endian order as needed */
+	lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
+
+	/* Invoke the proper event handling routine */
+	if (!bf_get(lpfc_trailer_async, &mcqe))
+		workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
+	else
+		workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
+	return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
+ * @phba: Pointer to HBA context object.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an ELS work-queue completion event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
+			     struct lpfc_wcqe_complete *wcqe)
+{
+	struct lpfc_iocbq *irspiocbq;
+	unsigned long iflags;
+	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
+
+	/* Get an irspiocbq for later ELS response processing use */
+	irspiocbq = lpfc_sli_get_iocbq(phba);
+	if (!irspiocbq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
+			"fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
+			pring->txq_cnt, phba->iocb_cnt,
+			phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt,
+			phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt);
+		return false;
+	}
+
+	/* Save off the slow-path queue event for work thread to process */
+	memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	list_add_tail(&irspiocbq->cq_event.list,
+		      &phba->sli4_hba.sp_queue_event);
+	phba->hba_flag |= HBA_SP_QUEUE_EVT;
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	return true;
+}
+
+/**
+ * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
+ * @phba: Pointer to HBA context object.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles slow-path WQ entry comsumed event by invoking the
+ * proper WQ release routine to the slow-path WQ.
+ **/
+static void
+lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
+			     struct lpfc_wcqe_release *wcqe)
+{
+	/* sanity check on queue memory */
+	if (unlikely(!phba->sli4_hba.els_wq))
+		return;
+	/* Check for the slow-path ELS work queue */
+	if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
+		lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
+				     bf_get(lpfc_wcqe_r_wqe_index, wcqe));
+	else
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"2579 Slow-path wqe consume event carries "
+				"miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
+				bf_get(lpfc_wcqe_r_wqe_index, wcqe),
+				phba->sli4_hba.els_wq->queue_id);
+}
+
+/**
+ * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to a WQ completion queue.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an XRI abort event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
+				   struct lpfc_queue *cq,
+				   struct sli4_wcqe_xri_aborted *wcqe)
+{
+	bool workposted = false;
+	struct lpfc_cq_event *cq_event;
+	unsigned long iflags;
+
+	/* Allocate a new internal CQ_EVENT entry */
+	cq_event = lpfc_sli4_cq_event_alloc(phba);
+	if (!cq_event) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0602 Failed to allocate CQ_EVENT entry\n");
+		return false;
+	}
+
+	/* Move the CQE into the proper xri abort event list */
+	memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
+	switch (cq->subtype) {
+	case LPFC_FCP:
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		list_add_tail(&cq_event->list,
+			      &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
+		/* Set the fcp xri abort event flag */
+		phba->hba_flag |= FCP_XRI_ABORT_EVENT;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		workposted = true;
+		break;
+	case LPFC_ELS:
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		list_add_tail(&cq_event->list,
+			      &phba->sli4_hba.sp_els_xri_aborted_work_queue);
+		/* Set the els xri abort event flag */
+		phba->hba_flag |= ELS_XRI_ABORT_EVENT;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		workposted = true;
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0603 Invalid work queue CQE subtype (x%x)\n",
+				cq->subtype);
+		workposted = false;
+		break;
+	}
+	return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @rcqe: Pointer to receive-queue completion queue entry.
+ *
+ * This routine process a receive-queue completion queue entry.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
+{
+	bool workposted = false;
+	struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
+	struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
+	struct hbq_dmabuf *dma_buf;
+	uint32_t status, rq_id;
+	unsigned long iflags;
+
+	/* sanity check on queue memory */
+	if (unlikely(!hrq) || unlikely(!drq))
+		return workposted;
+
+	if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
+		rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
+	else
+		rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
+	if (rq_id != hrq->queue_id)
+		goto out;
+
+	status = bf_get(lpfc_rcqe_status, rcqe);
+	switch (status) {
+	case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2537 Receive Frame Truncated!!\n");
+	case FC_STATUS_RQ_SUCCESS:
+		lpfc_sli4_rq_release(hrq, drq);
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
+		if (!dma_buf) {
+			spin_unlock_irqrestore(&phba->hbalock, iflags);
+			goto out;
+		}
+		memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
+		/* save off the frame for the word thread to process */
+		list_add_tail(&dma_buf->cq_event.list,
+			      &phba->sli4_hba.sp_queue_event);
+		/* Frame received */
+		phba->hba_flag |= HBA_SP_QUEUE_EVT;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		workposted = true;
+		break;
+	case FC_STATUS_INSUFF_BUF_NEED_BUF:
+	case FC_STATUS_INSUFF_BUF_FRM_DISC:
+		/* Post more buffers if possible */
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		workposted = true;
+		break;
+	}
+out:
+	return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to the completion queue.
+ * @wcqe: Pointer to a completion queue entry.
+ *
+ * This routine process a slow-path work-queue or receive queue completion queue
+ * entry.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+			 struct lpfc_cqe *cqe)
+{
+	struct lpfc_cqe cqevt;
+	bool workposted = false;
+
+	/* Copy the work queue CQE and convert endian order if needed */
+	lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
+
+	/* Check and process for different type of WCQE and dispatch */
+	switch (bf_get(lpfc_cqe_code, &cqevt)) {
+	case CQE_CODE_COMPL_WQE:
+		/* Process the WQ/RQ complete event */
+		phba->last_completion_time = jiffies;
+		workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
+				(struct lpfc_wcqe_complete *)&cqevt);
+		break;
+	case CQE_CODE_RELEASE_WQE:
+		/* Process the WQ release event */
+		lpfc_sli4_sp_handle_rel_wcqe(phba,
+				(struct lpfc_wcqe_release *)&cqevt);
+		break;
+	case CQE_CODE_XRI_ABORTED:
+		/* Process the WQ XRI abort event */
+		phba->last_completion_time = jiffies;
+		workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
+				(struct sli4_wcqe_xri_aborted *)&cqevt);
+		break;
+	case CQE_CODE_RECEIVE:
+	case CQE_CODE_RECEIVE_V1:
+		/* Process the RQ event */
+		phba->last_completion_time = jiffies;
+		workposted = lpfc_sli4_sp_handle_rcqe(phba,
+				(struct lpfc_rcqe *)&cqevt);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0388 Not a valid WCQE code: x%x\n",
+				bf_get(lpfc_cqe_code, &cqevt));
+		break;
+	}
+	return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the slow-path event queue.
+ * It will check the MajorCode and MinorCode to determine this is for a
+ * completion event on a completion queue, if not, an error shall be logged
+ * and just return. Otherwise, it will get to the corresponding completion
+ * queue and process all the entries on that completion queue, rearm the
+ * completion queue, and then return.
+ *
+ **/
+static void
+lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
+{
+	struct lpfc_queue *cq = NULL, *childq, *speq;
+	struct lpfc_cqe *cqe;
+	bool workposted = false;
+	int ecount = 0;
+	uint16_t cqid;
+
+	if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0359 Not a valid slow-path completion "
+				"event: majorcode=x%x, minorcode=x%x\n",
+				bf_get_le32(lpfc_eqe_major_code, eqe),
+				bf_get_le32(lpfc_eqe_minor_code, eqe));
+		return;
+	}
+
+	/* Get the reference to the corresponding CQ */
+	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+
+	/* Search for completion queue pointer matching this cqid */
+	speq = phba->sli4_hba.sp_eq;
+	/* sanity check on queue memory */
+	if (unlikely(!speq))
+		return;
+	list_for_each_entry(childq, &speq->child_list, list) {
+		if (childq->queue_id == cqid) {
+			cq = childq;
+			break;
+		}
+	}
+	if (unlikely(!cq)) {
+		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"0365 Slow-path CQ identifier "
+					"(%d) does not exist\n", cqid);
+		return;
+	}
+
+	/* Process all the entries to the CQ */
+	switch (cq->type) {
+	case LPFC_MCQ:
+		while ((cqe = lpfc_sli4_cq_get(cq))) {
+			workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
+			if (!(++ecount % cq->entry_repost))
+				lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+		}
+		break;
+	case LPFC_WCQ:
+		while ((cqe = lpfc_sli4_cq_get(cq))) {
+			if (cq->subtype == LPFC_FCP)
+				workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
+								       cqe);
+			else
+				workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
+								      cqe);
+			if (!(++ecount % cq->entry_repost))
+				lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+		}
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0370 Invalid completion queue type (%d)\n",
+				cq->type);
+		return;
+	}
+
+	/* Catch the no cq entry condition, log an error */
+	if (unlikely(ecount == 0))
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0371 No entry from the CQ: identifier "
+				"(x%x), type (%d)\n", cq->queue_id, cq->type);
+
+	/* In any case, flash and re-arm the RCQ */
+	lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+	/* wake up worker thread if there are works to be done */
+	if (workposted)
+		lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
+ * @eqe: Pointer to fast-path completion queue entry.
+ *
+ * This routine process a fast-path work queue completion entry from fast-path
+ * event queue for FCP command response completion.
+ **/
+static void
+lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
+			     struct lpfc_wcqe_complete *wcqe)
+{
+	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
+	struct lpfc_iocbq *cmdiocbq;
+	struct lpfc_iocbq irspiocbq;
+	unsigned long iflags;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	pring->stats.iocb_event++;
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	/* Check for response status */
+	if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
+		/* If resource errors reported from HBA, reduce queue
+		 * depth of the SCSI device.
+		 */
+		if ((bf_get(lpfc_wcqe_c_status, wcqe) ==
+		     IOSTAT_LOCAL_REJECT) &&
+		    (wcqe->parameter == IOERR_NO_RESOURCES)) {
+			phba->lpfc_rampdown_queue_depth(phba);
+		}
+		/* Log the error status */
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"0373 FCP complete error: status=x%x, "
+				"hw_status=x%x, total_data_specified=%d, "
+				"parameter=x%x, word3=x%x\n",
+				bf_get(lpfc_wcqe_c_status, wcqe),
+				bf_get(lpfc_wcqe_c_hw_status, wcqe),
+				wcqe->total_data_placed, wcqe->parameter,
+				wcqe->word3);
+	}
+
+	/* Look up the FCP command IOCB and create pseudo response IOCB */
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
+				bf_get(lpfc_wcqe_c_request_tag, wcqe));
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	if (unlikely(!cmdiocbq)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"0374 FCP complete with no corresponding "
+				"cmdiocb: iotag (%d)\n",
+				bf_get(lpfc_wcqe_c_request_tag, wcqe));
+		return;
+	}
+	if (unlikely(!cmdiocbq->iocb_cmpl)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"0375 FCP cmdiocb not callback function "
+				"iotag: (%d)\n",
+				bf_get(lpfc_wcqe_c_request_tag, wcqe));
+		return;
+	}
+
+	/* Fake the irspiocb and copy necessary response information */
+	lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
+
+	if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+	}
+
+	/* Pass the cmd_iocb and the rsp state to the upper layer */
+	(cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
+}
+
+/**
+ * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to completion queue.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an fast-path WQ entry comsumed event by invoking the
+ * proper WQ release routine to the slow-path WQ.
+ **/
+static void
+lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+			     struct lpfc_wcqe_release *wcqe)
+{
+	struct lpfc_queue *childwq;
+	bool wqid_matched = false;
+	uint16_t fcp_wqid;
+
+	/* Check for fast-path FCP work queue release */
+	fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
+	list_for_each_entry(childwq, &cq->child_list, list) {
+		if (childwq->queue_id == fcp_wqid) {
+			lpfc_sli4_wq_release(childwq,
+					bf_get(lpfc_wcqe_r_wqe_index, wcqe));
+			wqid_matched = true;
+			break;
+		}
+	}
+	/* Report warning log message if no match found */
+	if (wqid_matched != true)
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"2580 Fast-path wqe consume event carries "
+				"miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
+}
+
+/**
+ * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
+ * @cq: Pointer to the completion queue.
+ * @eqe: Pointer to fast-path completion queue entry.
+ *
+ * This routine process a fast-path work queue completion entry from fast-path
+ * event queue for FCP command response completion.
+ **/
+static int
+lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+			 struct lpfc_cqe *cqe)
+{
+	struct lpfc_wcqe_release wcqe;
+	bool workposted = false;
+
+	/* Copy the work queue CQE and convert endian order if needed */
+	lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
+
+	/* Check and process for different type of WCQE and dispatch */
+	switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
+	case CQE_CODE_COMPL_WQE:
+		/* Process the WQ complete event */
+		phba->last_completion_time = jiffies;
+		lpfc_sli4_fp_handle_fcp_wcqe(phba,
+				(struct lpfc_wcqe_complete *)&wcqe);
+		break;
+	case CQE_CODE_RELEASE_WQE:
+		/* Process the WQ release event */
+		lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
+				(struct lpfc_wcqe_release *)&wcqe);
+		break;
+	case CQE_CODE_XRI_ABORTED:
+		/* Process the WQ XRI abort event */
+		phba->last_completion_time = jiffies;
+		workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
+				(struct sli4_wcqe_xri_aborted *)&wcqe);
+		break;
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0144 Not a valid WCQE code: x%x\n",
+				bf_get(lpfc_wcqe_c_code, &wcqe));
+		break;
+	}
+	return workposted;
+}
+
+/**
+ * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the fast-path event queue.
+ * It will check the MajorCode and MinorCode to determine this is for a
+ * completion event on a completion queue, if not, an error shall be logged
+ * and just return. Otherwise, it will get to the corresponding completion
+ * queue and process all the entries on the completion queue, rearm the
+ * completion queue, and then return.
+ **/
+static void
+lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
+			uint32_t fcp_cqidx)
+{
+	struct lpfc_queue *cq;
+	struct lpfc_cqe *cqe;
+	bool workposted = false;
+	uint16_t cqid;
+	int ecount = 0;
+
+	if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0366 Not a valid fast-path completion "
+				"event: majorcode=x%x, minorcode=x%x\n",
+				bf_get_le32(lpfc_eqe_major_code, eqe),
+				bf_get_le32(lpfc_eqe_minor_code, eqe));
+		return;
+	}
+
+	if (unlikely(!phba->sli4_hba.fcp_cq)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+				"3146 Fast-path completion queues "
+				"does not exist\n");
+		return;
+	}
+	cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
+	if (unlikely(!cq)) {
+		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"0367 Fast-path completion queue "
+					"(%d) does not exist\n", fcp_cqidx);
+		return;
+	}
+
+	/* Get the reference to the corresponding CQ */
+	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+	if (unlikely(cqid != cq->queue_id)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0368 Miss-matched fast-path completion "
+				"queue identifier: eqcqid=%d, fcpcqid=%d\n",
+				cqid, cq->queue_id);
+		return;
+	}
+
+	/* Process all the entries to the CQ */
+	while ((cqe = lpfc_sli4_cq_get(cq))) {
+		workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
+		if (!(++ecount % cq->entry_repost))
+			lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+	}
+
+	/* Catch the no cq entry condition */
+	if (unlikely(ecount == 0))
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0369 No entry from fast-path completion "
+				"queue fcpcqid=%d\n", cq->queue_id);
+
+	/* In any case, flash and re-arm the CQ */
+	lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+	/* wake up worker thread if there are works to be done */
+	if (workposted)
+		lpfc_worker_wake_up(phba);
+}
+
+static void
+lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
+{
+	struct lpfc_eqe *eqe;
+
+	/* walk all the EQ entries and drop on the floor */
+	while ((eqe = lpfc_sli4_eq_get(eq)))
+		;
+
+	/* Clear and re-arm the EQ */
+	lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
+}
+
+/**
+ * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-4 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there are slow-path events in
+ * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
+ * interrupt mode, this function is called as part of the device-level
+ * interrupt handler. When the PCI slot is in error recovery or the HBA is
+ * undergoing initialization, the interrupt handler will not process the
+ * interrupt. The link attention and ELS ring attention events are handled
+ * by the worker thread. The interrupt handler signals the worker thread
+ * and returns for these events. This function is called without any lock
+ * held. It gets the hbalock to access and update SLI data structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
+{
+	struct lpfc_hba *phba;
+	struct lpfc_queue *speq;
+	struct lpfc_eqe *eqe;
+	unsigned long iflag;
+	int ecount = 0;
+
+	/*
+	 * Get the driver's phba structure from the dev_id
+	 */
+	phba = (struct lpfc_hba *)dev_id;
+
+	if (unlikely(!phba))
+		return IRQ_NONE;
+
+	/* Get to the EQ struct associated with this vector */
+	speq = phba->sli4_hba.sp_eq;
+	if (unlikely(!speq))
+		return IRQ_NONE;
+
+	/* Check device state for handling interrupt */
+	if (unlikely(lpfc_intr_state_check(phba))) {
+		/* Check again for link_state with lock held */
+		spin_lock_irqsave(&phba->hbalock, iflag);
+		if (phba->link_state < LPFC_LINK_DOWN)
+			/* Flush, clear interrupt, and rearm the EQ */
+			lpfc_sli4_eq_flush(phba, speq);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		return IRQ_NONE;
+	}
+
+	/*
+	 * Process all the event on FCP slow-path EQ
+	 */
+	while ((eqe = lpfc_sli4_eq_get(speq))) {
+		lpfc_sli4_sp_handle_eqe(phba, eqe);
+		if (!(++ecount % speq->entry_repost))
+			lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
+	}
+
+	/* Always clear and re-arm the slow-path EQ */
+	lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
+
+	/* Catch the no cq entry condition */
+	if (unlikely(ecount == 0)) {
+		if (phba->intr_type == MSIX)
+			/* MSI-X treated interrupt served as no EQ share INT */
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					"0357 MSI-X interrupt with no EQE\n");
+		else
+			/* Non MSI-X treated on interrupt as EQ share INT */
+			return IRQ_NONE;
+	}
+
+	return IRQ_HANDLED;
+} /* lpfc_sli4_sp_intr_handler */
+
+/**
+ * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-4 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
+ * ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The SCSI FCP fast-path ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures. Note that,
+ * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
+ * equal to that of FCP CQ index.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
+{
+	struct lpfc_hba *phba;
+	struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
+	struct lpfc_queue *fpeq;
+	struct lpfc_eqe *eqe;
+	unsigned long iflag;
+	int ecount = 0;
+	uint32_t fcp_eqidx;
+
+	/* Get the driver's phba structure from the dev_id */
+	fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
+	phba = fcp_eq_hdl->phba;
+	fcp_eqidx = fcp_eq_hdl->idx;
+
+	if (unlikely(!phba))
+		return IRQ_NONE;
+	if (unlikely(!phba->sli4_hba.fp_eq))
+		return IRQ_NONE;
+
+	/* Get to the EQ struct associated with this vector */
+	fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
+	if (unlikely(!fpeq))
+		return IRQ_NONE;
+
+	/* Check device state for handling interrupt */
+	if (unlikely(lpfc_intr_state_check(phba))) {
+		/* Check again for link_state with lock held */
+		spin_lock_irqsave(&phba->hbalock, iflag);
+		if (phba->link_state < LPFC_LINK_DOWN)
+			/* Flush, clear interrupt, and rearm the EQ */
+			lpfc_sli4_eq_flush(phba, fpeq);
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		return IRQ_NONE;
+	}
+
+	/*
+	 * Process all the event on FCP fast-path EQ
+	 */
+	while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+		lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
+		if (!(++ecount % fpeq->entry_repost))
+			lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
+	}
+
+	/* Always clear and re-arm the fast-path EQ */
+	lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
+
+	if (unlikely(ecount == 0)) {
+		if (phba->intr_type == MSIX)
+			/* MSI-X treated interrupt served as no EQ share INT */
+			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+					"0358 MSI-X interrupt with no EQE\n");
+		else
+			/* Non MSI-X treated on interrupt as EQ share INT */
+			return IRQ_NONE;
+	}
+
+	return IRQ_HANDLED;
+} /* lpfc_sli4_fp_intr_handler */
+
+/**
+ * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is the device-level interrupt handler to device with SLI-4
+ * interface spec, called from the PCI layer when either MSI or Pin-IRQ
+ * interrupt mode is enabled and there is an event in the HBA which requires
+ * driver attention. This function invokes the slow-path interrupt attention
+ * handling function and fast-path interrupt attention handling function in
+ * turn to process the relevant HBA attention events. This function is called
+ * without any lock held. It gets the hbalock to access and update SLI data
+ * structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled, else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_intr_handler(int irq, void *dev_id)
+{
+	struct lpfc_hba  *phba;
+	irqreturn_t sp_irq_rc, fp_irq_rc;
+	bool fp_handled = false;
+	uint32_t fcp_eqidx;
+
+	/* Get the driver's phba structure from the dev_id */
+	phba = (struct lpfc_hba *)dev_id;
+
+	if (unlikely(!phba))
+		return IRQ_NONE;
+
+	/*
+	 * Invokes slow-path host attention interrupt handling as appropriate.
+	 */
+	sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
+
+	/*
+	 * Invoke fast-path host attention interrupt handling as appropriate.
+	 */
+	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
+		fp_irq_rc = lpfc_sli4_fp_intr_handler(irq,
+					&phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
+		if (fp_irq_rc == IRQ_HANDLED)
+			fp_handled |= true;
+	}
+
+	return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc;
+} /* lpfc_sli4_intr_handler */
+
+/**
+ * lpfc_sli4_queue_free - free a queue structure and associated memory
+ * @queue: The queue structure to free.
+ *
+ * This function frees a queue structure and the DMAable memory used for
+ * the host resident queue. This function must be called after destroying the
+ * queue on the HBA.
+ **/
+void
+lpfc_sli4_queue_free(struct lpfc_queue *queue)
+{
+	struct lpfc_dmabuf *dmabuf;
+
+	if (!queue)
+		return;
+
+	while (!list_empty(&queue->page_list)) {
+		list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
+				 list);
+		dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
+				  dmabuf->virt, dmabuf->phys);
+		kfree(dmabuf);
+	}
+	kfree(queue);
+	return;
+}
+
+/**
+ * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
+ * @phba: The HBA that this queue is being created on.
+ * @entry_size: The size of each queue entry for this queue.
+ * @entry count: The number of entries that this queue will handle.
+ *
+ * This function allocates a queue structure and the DMAable memory used for
+ * the host resident queue. This function must be called before creating the
+ * queue on the HBA.
+ **/
+struct lpfc_queue *
+lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
+		      uint32_t entry_count)
+{
+	struct lpfc_queue *queue;
+	struct lpfc_dmabuf *dmabuf;
+	int x, total_qe_count;
+	void *dma_pointer;
+	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+	if (!phba->sli4_hba.pc_sli4_params.supported)
+		hw_page_size = SLI4_PAGE_SIZE;
+
+	queue = kzalloc(sizeof(struct lpfc_queue) +
+			(sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
+	if (!queue)
+		return NULL;
+	queue->page_count = (ALIGN(entry_size * entry_count,
+			hw_page_size))/hw_page_size;
+	INIT_LIST_HEAD(&queue->list);
+	INIT_LIST_HEAD(&queue->page_list);
+	INIT_LIST_HEAD(&queue->child_list);
+	for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
+		dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+		if (!dmabuf)
+			goto out_fail;
+		dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+						  hw_page_size, &dmabuf->phys,
+						  GFP_KERNEL);
+		if (!dmabuf->virt) {
+			kfree(dmabuf);
+			goto out_fail;
+		}
+		memset(dmabuf->virt, 0, hw_page_size);
+		dmabuf->buffer_tag = x;
+		list_add_tail(&dmabuf->list, &queue->page_list);
+		/* initialize queue's entry array */
+		dma_pointer = dmabuf->virt;
+		for (; total_qe_count < entry_count &&
+		     dma_pointer < (hw_page_size + dmabuf->virt);
+		     total_qe_count++, dma_pointer += entry_size) {
+			queue->qe[total_qe_count].address = dma_pointer;
+		}
+	}
+	queue->entry_size = entry_size;
+	queue->entry_count = entry_count;
+
+	/*
+	 * entry_repost is calculated based on the number of entries in the
+	 * queue. This works out except for RQs. If buffers are NOT initially
+	 * posted for every RQE, entry_repost should be adjusted accordingly.
+	 */
+	queue->entry_repost = (entry_count >> 3);
+	if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
+		queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
+	queue->phba = phba;
+
+	return queue;
+out_fail:
+	lpfc_sli4_queue_free(queue);
+	return NULL;
+}
+
+/**
+ * lpfc_eq_create - Create an Event Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @eq: The queue structure to use to create the event queue.
+ * @imax: The maximum interrupt per second limit.
+ *
+ * This function creates an event queue, as detailed in @eq, on a port,
+ * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @eq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. This
+ * function will send the EQ_CREATE mailbox command to the HBA to setup the
+ * event queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+uint32_t
+lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
+{
+	struct lpfc_mbx_eq_create *eq_create;
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	struct lpfc_dmabuf *dmabuf;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+	uint16_t dmult;
+	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+	/* sanity check on queue memory */
+	if (!eq)
+		return -ENODEV;
+	if (!phba->sli4_hba.pc_sli4_params.supported)
+		hw_page_size = SLI4_PAGE_SIZE;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_eq_create) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_EQ_CREATE,
+			 length, LPFC_SLI4_MBX_EMBED);
+	eq_create = &mbox->u.mqe.un.eq_create;
+	bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
+	       eq->page_count);
+	bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
+	       LPFC_EQE_SIZE);
+	bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
+	/* Calculate delay multiper from maximum interrupt per second */
+	dmult = LPFC_DMULT_CONST/imax - 1;
+	bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
+	       dmult);
+	switch (eq->entry_count) {
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0360 Unsupported EQ count. (%d)\n",
+				eq->entry_count);
+		if (eq->entry_count < 256)
+			return -EINVAL;
+		/* otherwise default to smallest count (drop through) */
+	case 256:
+		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+		       LPFC_EQ_CNT_256);
+		break;
+	case 512:
+		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+		       LPFC_EQ_CNT_512);
+		break;
+	case 1024:
+		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+		       LPFC_EQ_CNT_1024);
+		break;
+	case 2048:
+		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+		       LPFC_EQ_CNT_2048);
+		break;
+	case 4096:
+		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+		       LPFC_EQ_CNT_4096);
+		break;
+	}
+	list_for_each_entry(dmabuf, &eq->page_list, list) {
+		memset(dmabuf->virt, 0, hw_page_size);
+		eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+					putPaddrLow(dmabuf->phys);
+		eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+					putPaddrHigh(dmabuf->phys);
+	}
+	mbox->vport = phba->pport;
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	mbox->context1 = NULL;
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2500 EQ_CREATE mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+	}
+	eq->type = LPFC_EQ;
+	eq->subtype = LPFC_NONE;
+	eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
+	if (eq->queue_id == 0xFFFF)
+		status = -ENXIO;
+	eq->host_index = 0;
+	eq->hba_index = 0;
+
+	mempool_free(mbox, phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_cq_create - Create a Completion Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @cq: The queue structure to use to create the completion queue.
+ * @eq: The event queue to bind this completion queue to.
+ *
+ * This function creates a completion queue, as detailed in @wq, on a port,
+ * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @cq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. The @eq
+ * is used to indicate which event queue to bind this completion queue to. This
+ * function will send the CQ_CREATE mailbox command to the HBA to setup the
+ * completion queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+uint32_t
+lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
+	       struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
+{
+	struct lpfc_mbx_cq_create *cq_create;
+	struct lpfc_dmabuf *dmabuf;
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+	/* sanity check on queue memory */
+	if (!cq || !eq)
+		return -ENODEV;
+	if (!phba->sli4_hba.pc_sli4_params.supported)
+		hw_page_size = SLI4_PAGE_SIZE;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_cq_create) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_CQ_CREATE,
+			 length, LPFC_SLI4_MBX_EMBED);
+	cq_create = &mbox->u.mqe.un.cq_create;
+	shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
+	bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
+		    cq->page_count);
+	bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
+	bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
+	bf_set(lpfc_mbox_hdr_version, &shdr->request,
+	       phba->sli4_hba.pc_sli4_params.cqv);
+	if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
+		/* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
+		bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
+		bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
+		       eq->queue_id);
+	} else {
+		bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
+		       eq->queue_id);
+	}
+	switch (cq->entry_count) {
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0361 Unsupported CQ count. (%d)\n",
+				cq->entry_count);
+		if (cq->entry_count < 256)
+			return -EINVAL;
+		/* otherwise default to smallest count (drop through) */
+	case 256:
+		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
+		       LPFC_CQ_CNT_256);
+		break;
+	case 512:
+		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
+		       LPFC_CQ_CNT_512);
+		break;
+	case 1024:
+		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
+		       LPFC_CQ_CNT_1024);
+		break;
+	}
+	list_for_each_entry(dmabuf, &cq->page_list, list) {
+		memset(dmabuf->virt, 0, hw_page_size);
+		cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+					putPaddrLow(dmabuf->phys);
+		cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+					putPaddrHigh(dmabuf->phys);
+	}
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2501 CQ_CREATE mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+		goto out;
+	}
+	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
+	if (cq->queue_id == 0xFFFF) {
+		status = -ENXIO;
+		goto out;
+	}
+	/* link the cq onto the parent eq child list */
+	list_add_tail(&cq->list, &eq->child_list);
+	/* Set up completion queue's type and subtype */
+	cq->type = type;
+	cq->subtype = subtype;
+	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
+	cq->assoc_qid = eq->queue_id;
+	cq->host_index = 0;
+	cq->hba_index = 0;
+
+out:
+	mempool_free(mbox, phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @mq: The queue structure to use to create the mailbox queue.
+ * @mbox: An allocated pointer to type LPFC_MBOXQ_t
+ * @cq: The completion queue to associate with this cq.
+ *
+ * This function provides failback (fb) functionality when the
+ * mq_create_ext fails on older FW generations.  It's purpose is identical
+ * to mq_create_ext otherwise.
+ *
+ * This routine cannot fail as all attributes were previously accessed and
+ * initialized in mq_create_ext.
+ **/
+static void
+lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
+		       LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
+{
+	struct lpfc_mbx_mq_create *mq_create;
+	struct lpfc_dmabuf *dmabuf;
+	int length;
+
+	length = (sizeof(struct lpfc_mbx_mq_create) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_MQ_CREATE,
+			 length, LPFC_SLI4_MBX_EMBED);
+	mq_create = &mbox->u.mqe.un.mq_create;
+	bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
+	       mq->page_count);
+	bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
+	       cq->queue_id);
+	bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
+	switch (mq->entry_count) {
+	case 16:
+		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+		       LPFC_MQ_RING_SIZE_16);
+		break;
+	case 32:
+		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+		       LPFC_MQ_RING_SIZE_32);
+		break;
+	case 64:
+		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+		       LPFC_MQ_RING_SIZE_64);
+		break;
+	case 128:
+		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+		       LPFC_MQ_RING_SIZE_128);
+		break;
+	}
+	list_for_each_entry(dmabuf, &mq->page_list, list) {
+		mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+			putPaddrLow(dmabuf->phys);
+		mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+			putPaddrHigh(dmabuf->phys);
+	}
+}
+
+/**
+ * lpfc_mq_create - Create a mailbox Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @mq: The queue structure to use to create the mailbox queue.
+ * @cq: The completion queue to associate with this cq.
+ * @subtype: The queue's subtype.
+ *
+ * This function creates a mailbox queue, as detailed in @mq, on a port,
+ * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @cq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. This
+ * function will send the MQ_CREATE mailbox command to the HBA to setup the
+ * mailbox queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+int32_t
+lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
+	       struct lpfc_queue *cq, uint32_t subtype)
+{
+	struct lpfc_mbx_mq_create *mq_create;
+	struct lpfc_mbx_mq_create_ext *mq_create_ext;
+	struct lpfc_dmabuf *dmabuf;
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+	/* sanity check on queue memory */
+	if (!mq || !cq)
+		return -ENODEV;
+	if (!phba->sli4_hba.pc_sli4_params.supported)
+		hw_page_size = SLI4_PAGE_SIZE;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_mq_create_ext) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
+			 length, LPFC_SLI4_MBX_EMBED);
+
+	mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
+	shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
+	bf_set(lpfc_mbx_mq_create_ext_num_pages,
+	       &mq_create_ext->u.request, mq->page_count);
+	bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
+	       &mq_create_ext->u.request, 1);
+	bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
+	       &mq_create_ext->u.request, 1);
+	bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
+	       &mq_create_ext->u.request, 1);
+	bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
+	       &mq_create_ext->u.request, 1);
+	bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
+	       &mq_create_ext->u.request, 1);
+	bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
+	bf_set(lpfc_mbox_hdr_version, &shdr->request,
+	       phba->sli4_hba.pc_sli4_params.mqv);
+	if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
+		bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
+		       cq->queue_id);
+	else
+		bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
+		       cq->queue_id);
+	switch (mq->entry_count) {
+	default:
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0362 Unsupported MQ count. (%d)\n",
+				mq->entry_count);
+		if (mq->entry_count < 16)
+			return -EINVAL;
+		/* otherwise default to smallest count (drop through) */
+	case 16:
+		bf_set(lpfc_mq_context_ring_size,
+		       &mq_create_ext->u.request.context,
+		       LPFC_MQ_RING_SIZE_16);
+		break;
+	case 32:
+		bf_set(lpfc_mq_context_ring_size,
+		       &mq_create_ext->u.request.context,
+		       LPFC_MQ_RING_SIZE_32);
+		break;
+	case 64:
+		bf_set(lpfc_mq_context_ring_size,
+		       &mq_create_ext->u.request.context,
+		       LPFC_MQ_RING_SIZE_64);
+		break;
+	case 128:
+		bf_set(lpfc_mq_context_ring_size,
+		       &mq_create_ext->u.request.context,
+		       LPFC_MQ_RING_SIZE_128);
+		break;
+	}
+	list_for_each_entry(dmabuf, &mq->page_list, list) {
+		memset(dmabuf->virt, 0, hw_page_size);
+		mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
+					putPaddrLow(dmabuf->phys);
+		mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
+					putPaddrHigh(dmabuf->phys);
+	}
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
+			      &mq_create_ext->u.response);
+	if (rc != MBX_SUCCESS) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"2795 MQ_CREATE_EXT failed with "
+				"status x%x. Failback to MQ_CREATE.\n",
+				rc);
+		lpfc_mq_create_fb_init(phba, mq, mbox, cq);
+		mq_create = &mbox->u.mqe.un.mq_create;
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+		shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
+		mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
+				      &mq_create->u.response);
+	}
+
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2502 MQ_CREATE mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+		goto out;
+	}
+	if (mq->queue_id == 0xFFFF) {
+		status = -ENXIO;
+		goto out;
+	}
+	mq->type = LPFC_MQ;
+	mq->assoc_qid = cq->queue_id;
+	mq->subtype = subtype;
+	mq->host_index = 0;
+	mq->hba_index = 0;
+
+	/* link the mq onto the parent cq child list */
+	list_add_tail(&mq->list, &cq->child_list);
+out:
+	mempool_free(mbox, phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_wq_create - Create a Work Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @wq: The queue structure to use to create the work queue.
+ * @cq: The completion queue to bind this work queue to.
+ * @subtype: The subtype of the work queue indicating its functionality.
+ *
+ * This function creates a work queue, as detailed in @wq, on a port, described
+ * by @phba by sending a WQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @wq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. The @cq
+ * is used to indicate which completion queue to bind this work queue to. This
+ * function will send the WQ_CREATE mailbox command to the HBA to setup the
+ * work queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+uint32_t
+lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+	       struct lpfc_queue *cq, uint32_t subtype)
+{
+	struct lpfc_mbx_wq_create *wq_create;
+	struct lpfc_dmabuf *dmabuf;
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+	struct dma_address *page;
+
+	/* sanity check on queue memory */
+	if (!wq || !cq)
+		return -ENODEV;
+	if (!phba->sli4_hba.pc_sli4_params.supported)
+		hw_page_size = SLI4_PAGE_SIZE;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_wq_create) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
+			 length, LPFC_SLI4_MBX_EMBED);
+	wq_create = &mbox->u.mqe.un.wq_create;
+	shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
+	bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
+		    wq->page_count);
+	bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
+		    cq->queue_id);
+	bf_set(lpfc_mbox_hdr_version, &shdr->request,
+	       phba->sli4_hba.pc_sli4_params.wqv);
+	if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
+		bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
+		       wq->entry_count);
+		switch (wq->entry_size) {
+		default:
+		case 64:
+			bf_set(lpfc_mbx_wq_create_wqe_size,
+			       &wq_create->u.request_1,
+			       LPFC_WQ_WQE_SIZE_64);
+			break;
+		case 128:
+			bf_set(lpfc_mbx_wq_create_wqe_size,
+			       &wq_create->u.request_1,
+			       LPFC_WQ_WQE_SIZE_128);
+			break;
+		}
+		bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
+		       (PAGE_SIZE/SLI4_PAGE_SIZE));
+		page = wq_create->u.request_1.page;
+	} else {
+		page = wq_create->u.request.page;
+	}
+	list_for_each_entry(dmabuf, &wq->page_list, list) {
+		memset(dmabuf->virt, 0, hw_page_size);
+		page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
+		page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
+	}
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2503 WQ_CREATE mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+		goto out;
+	}
+	wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
+	if (wq->queue_id == 0xFFFF) {
+		status = -ENXIO;
+		goto out;
+	}
+	wq->type = LPFC_WQ;
+	wq->assoc_qid = cq->queue_id;
+	wq->subtype = subtype;
+	wq->host_index = 0;
+	wq->hba_index = 0;
+	wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
+
+	/* link the wq onto the parent cq child list */
+	list_add_tail(&wq->list, &cq->child_list);
+out:
+	mempool_free(mbox, phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @rq:   The queue structure to use for the receive queue.
+ * @qno:  The associated HBQ number
+ *
+ *
+ * For SLI4 we need to adjust the RQ repost value based on
+ * the number of buffers that are initially posted to the RQ.
+ */
+void
+lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
+{
+	uint32_t cnt;
+
+	/* sanity check on queue memory */
+	if (!rq)
+		return;
+	cnt = lpfc_hbq_defs[qno]->entry_count;
+
+	/* Recalc repost for RQs based on buffers initially posted */
+	cnt = (cnt >> 3);
+	if (cnt < LPFC_QUEUE_MIN_REPOST)
+		cnt = LPFC_QUEUE_MIN_REPOST;
+
+	rq->entry_repost = cnt;
+}
+
+/**
+ * lpfc_rq_create - Create a Receive Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @hrq: The queue structure to use to create the header receive queue.
+ * @drq: The queue structure to use to create the data receive queue.
+ * @cq: The completion queue to bind this work queue to.
+ *
+ * This function creates a receive buffer queue pair , as detailed in @hrq and
+ * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
+ * to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
+ * struct is used to get the entry count that is necessary to determine the
+ * number of pages to use for this queue. The @cq is used to indicate which
+ * completion queue to bind received buffers that are posted to these queues to.
+ * This function will send the RQ_CREATE mailbox command to the HBA to setup the
+ * receive queue pair. This function is asynchronous and will wait for the
+ * mailbox command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+uint32_t
+lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+	       struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
+{
+	struct lpfc_mbx_rq_create *rq_create;
+	struct lpfc_dmabuf *dmabuf;
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+	/* sanity check on queue memory */
+	if (!hrq || !drq || !cq)
+		return -ENODEV;
+	if (!phba->sli4_hba.pc_sli4_params.supported)
+		hw_page_size = SLI4_PAGE_SIZE;
+
+	if (hrq->entry_count != drq->entry_count)
+		return -EINVAL;
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_rq_create) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
+			 length, LPFC_SLI4_MBX_EMBED);
+	rq_create = &mbox->u.mqe.un.rq_create;
+	shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
+	bf_set(lpfc_mbox_hdr_version, &shdr->request,
+	       phba->sli4_hba.pc_sli4_params.rqv);
+	if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
+		bf_set(lpfc_rq_context_rqe_count_1,
+		       &rq_create->u.request.context,
+		       hrq->entry_count);
+		rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
+		bf_set(lpfc_rq_context_rqe_size,
+		       &rq_create->u.request.context,
+		       LPFC_RQE_SIZE_8);
+		bf_set(lpfc_rq_context_page_size,
+		       &rq_create->u.request.context,
+		       (PAGE_SIZE/SLI4_PAGE_SIZE));
+	} else {
+		switch (hrq->entry_count) {
+		default:
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2535 Unsupported RQ count. (%d)\n",
+					hrq->entry_count);
+			if (hrq->entry_count < 512)
+				return -EINVAL;
+			/* otherwise default to smallest count (drop through) */
+		case 512:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_512);
+			break;
+		case 1024:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_1024);
+			break;
+		case 2048:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_2048);
+			break;
+		case 4096:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_4096);
+			break;
+		}
+		bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
+		       LPFC_HDR_BUF_SIZE);
+	}
+	bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
+	       cq->queue_id);
+	bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
+	       hrq->page_count);
+	list_for_each_entry(dmabuf, &hrq->page_list, list) {
+		memset(dmabuf->virt, 0, hw_page_size);
+		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+					putPaddrLow(dmabuf->phys);
+		rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+					putPaddrHigh(dmabuf->phys);
+	}
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2504 RQ_CREATE mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+		goto out;
+	}
+	hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
+	if (hrq->queue_id == 0xFFFF) {
+		status = -ENXIO;
+		goto out;
+	}
+	hrq->type = LPFC_HRQ;
+	hrq->assoc_qid = cq->queue_id;
+	hrq->subtype = subtype;
+	hrq->host_index = 0;
+	hrq->hba_index = 0;
+
+	/* now create the data queue */
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
+			 length, LPFC_SLI4_MBX_EMBED);
+	bf_set(lpfc_mbox_hdr_version, &shdr->request,
+	       phba->sli4_hba.pc_sli4_params.rqv);
+	if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
+		bf_set(lpfc_rq_context_rqe_count_1,
+		       &rq_create->u.request.context, hrq->entry_count);
+		rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
+		bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
+		       LPFC_RQE_SIZE_8);
+		bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
+		       (PAGE_SIZE/SLI4_PAGE_SIZE));
+	} else {
+		switch (drq->entry_count) {
+		default:
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2536 Unsupported RQ count. (%d)\n",
+					drq->entry_count);
+			if (drq->entry_count < 512)
+				return -EINVAL;
+			/* otherwise default to smallest count (drop through) */
+		case 512:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_512);
+			break;
+		case 1024:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_1024);
+			break;
+		case 2048:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_2048);
+			break;
+		case 4096:
+			bf_set(lpfc_rq_context_rqe_count,
+			       &rq_create->u.request.context,
+			       LPFC_RQ_RING_SIZE_4096);
+			break;
+		}
+		bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
+		       LPFC_DATA_BUF_SIZE);
+	}
+	bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
+	       cq->queue_id);
+	bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
+	       drq->page_count);
+	list_for_each_entry(dmabuf, &drq->page_list, list) {
+		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+					putPaddrLow(dmabuf->phys);
+		rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+					putPaddrHigh(dmabuf->phys);
+	}
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		status = -ENXIO;
+		goto out;
+	}
+	drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
+	if (drq->queue_id == 0xFFFF) {
+		status = -ENXIO;
+		goto out;
+	}
+	drq->type = LPFC_DRQ;
+	drq->assoc_qid = cq->queue_id;
+	drq->subtype = subtype;
+	drq->host_index = 0;
+	drq->hba_index = 0;
+
+	/* link the header and data RQs onto the parent cq child list */
+	list_add_tail(&hrq->list, &cq->child_list);
+	list_add_tail(&drq->list, &cq->child_list);
+
+out:
+	mempool_free(mbox, phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_eq_destroy - Destroy an event Queue on the HBA
+ * @eq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @eq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @eq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return -ENXIO.
+ **/
+uint32_t
+lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	/* sanity check on queue memory */
+	if (!eq)
+		return -ENODEV;
+	mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_eq_destroy) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_EQ_DESTROY,
+			 length, LPFC_SLI4_MBX_EMBED);
+	bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
+	       eq->queue_id);
+	mbox->vport = eq->phba->pport;
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+
+	rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2505 EQ_DESTROY mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+	}
+
+	/* Remove eq from any list */
+	list_del_init(&eq->list);
+	mempool_free(mbox, eq->phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
+ * @cq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @cq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @cq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return -ENXIO.
+ **/
+uint32_t
+lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	/* sanity check on queue memory */
+	if (!cq)
+		return -ENODEV;
+	mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_cq_destroy) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_CQ_DESTROY,
+			 length, LPFC_SLI4_MBX_EMBED);
+	bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
+	       cq->queue_id);
+	mbox->vport = cq->phba->pport;
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&mbox->u.mqe.un.wq_create.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2506 CQ_DESTROY mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+	}
+	/* Remove cq from any list */
+	list_del_init(&cq->list);
+	mempool_free(mbox, cq->phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
+ * @qm: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @mq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @mq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return -ENXIO.
+ **/
+uint32_t
+lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	/* sanity check on queue memory */
+	if (!mq)
+		return -ENODEV;
+	mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_mq_destroy) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_MQ_DESTROY,
+			 length, LPFC_SLI4_MBX_EMBED);
+	bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
+	       mq->queue_id);
+	mbox->vport = mq->phba->pport;
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2507 MQ_DESTROY mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+	}
+	/* Remove mq from any list */
+	list_del_init(&mq->list);
+	mempool_free(mbox, mq->phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_wq_destroy - Destroy a Work Queue on the HBA
+ * @wq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @wq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @wq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return -ENXIO.
+ **/
+uint32_t
+lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	/* sanity check on queue memory */
+	if (!wq)
+		return -ENODEV;
+	mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_wq_destroy) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
+			 length, LPFC_SLI4_MBX_EMBED);
+	bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
+	       wq->queue_id);
+	mbox->vport = wq->phba->pport;
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2508 WQ_DESTROY mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+	}
+	/* Remove wq from any list */
+	list_del_init(&wq->list);
+	mempool_free(mbox, wq->phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
+ * @rq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @rq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @rq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return -ENXIO.
+ **/
+uint32_t
+lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+		struct lpfc_queue *drq)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc, length, status = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	/* sanity check on queue memory */
+	if (!hrq || !drq)
+		return -ENODEV;
+	mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+	length = (sizeof(struct lpfc_mbx_rq_destroy) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
+			 length, LPFC_SLI4_MBX_EMBED);
+	bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
+	       hrq->queue_id);
+	mbox->vport = hrq->phba->pport;
+	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2509 RQ_DESTROY mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		if (rc != MBX_TIMEOUT)
+			mempool_free(mbox, hrq->phba->mbox_mem_pool);
+		return -ENXIO;
+	}
+	bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
+	       drq->queue_id);
+	rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
+	shdr = (union lpfc_sli4_cfg_shdr *)
+		&mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2510 RQ_DESTROY mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		status = -ENXIO;
+	}
+	list_del_init(&hrq->list);
+	list_del_init(&drq->list);
+	mempool_free(mbox, hrq->phba->mbox_mem_pool);
+	return status;
+}
+
+/**
+ * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
+ * @phba: The virtual port for which this call being executed.
+ * @pdma_phys_addr0: Physical address of the 1st SGL page.
+ * @pdma_phys_addr1: Physical address of the 2nd SGL page.
+ * @xritag: the xritag that ties this io to the SGL pages.
+ *
+ * This routine will post the sgl pages for the IO that has the xritag
+ * that is in the iocbq structure. The xritag is assigned during iocbq
+ * creation and persists for as long as the driver is loaded.
+ * if the caller has fewer than 256 scatter gather segments to map then
+ * pdma_phys_addr1 should be 0.
+ * If the caller needs to map more than 256 scatter gather segment then
+ * pdma_phys_addr1 should be a valid physical address.
+ * physical address for SGLs must be 64 byte aligned.
+ * If you are going to map 2 SGL's then the first one must have 256 entries
+ * the second sgl can have between 1 and 256 entries.
+ *
+ * Return codes:
+ * 	0 - Success
+ * 	-ENXIO, -ENOMEM - Failure
+ **/
+int
+lpfc_sli4_post_sgl(struct lpfc_hba *phba,
+		dma_addr_t pdma_phys_addr0,
+		dma_addr_t pdma_phys_addr1,
+		uint16_t xritag)
+{
+	struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
+	LPFC_MBOXQ_t *mbox;
+	int rc;
+	uint32_t shdr_status, shdr_add_status;
+	uint32_t mbox_tmo;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	if (xritag == NO_XRI) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"0364 Invalid param:\n");
+		return -EINVAL;
+	}
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+			sizeof(struct lpfc_mbx_post_sgl_pages) -
+			sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
+
+	post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
+				&mbox->u.mqe.un.post_sgl_pages;
+	bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
+	bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
+
+	post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo	=
+				cpu_to_le32(putPaddrLow(pdma_phys_addr0));
+	post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
+				cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
+
+	post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo	=
+				cpu_to_le32(putPaddrLow(pdma_phys_addr1));
+	post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
+				cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+	}
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mbox, phba->mbox_mem_pool);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2511 POST_SGL mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		rc = -ENXIO;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * HBA consistent with the SLI-4 interface spec.  This routine
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
+ *
+ * Returns
+ *	A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
+ *	LPFC_RPI_ALLOC_ERROR if no rpis are available.
+ **/
+uint16_t
+lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
+{
+	unsigned long xri;
+
+	/*
+	 * Fetch the next logical xri.  Because this index is logical,
+	 * the driver starts at 0 each time.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
+				 phba->sli4_hba.max_cfg_param.max_xri, 0);
+	if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
+		spin_unlock_irq(&phba->hbalock);
+		return NO_XRI;
+	} else {
+		set_bit(xri, phba->sli4_hba.xri_bmask);
+		phba->sli4_hba.max_cfg_param.xri_used++;
+		phba->sli4_hba.xri_count++;
+	}
+
+	spin_unlock_irq(&phba->hbalock);
+	return xri;
+}
+
+/**
+ * lpfc_sli4_free_xri - Release an xri for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an xri to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
+{
+	if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
+		phba->sli4_hba.xri_count--;
+		phba->sli4_hba.max_cfg_param.xri_used--;
+	}
+}
+
+/**
+ * lpfc_sli4_free_xri - Release an xri for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an xri to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
+{
+	spin_lock_irq(&phba->hbalock);
+	__lpfc_sli4_free_xri(phba, xri);
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_next_xritag - Get an xritag for the io
+ * @phba: Pointer to HBA context object.
+ *
+ * This function gets an xritag for the iocb. If there is no unused xritag
+ * it will return 0xffff.
+ * The function returns the allocated xritag if successful, else returns zero.
+ * Zero is not a valid xritag.
+ * The caller is not required to hold any lock.
+ **/
+uint16_t
+lpfc_sli4_next_xritag(struct lpfc_hba *phba)
+{
+	uint16_t xri_index;
+
+	xri_index = lpfc_sli4_alloc_xri(phba);
+	if (xri_index != NO_XRI)
+		return xri_index;
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"2004 Failed to allocate XRI.last XRITAG is %d"
+			" Max XRI is %d, Used XRI is %d\n",
+			xri_index,
+			phba->sli4_hba.max_cfg_param.max_xri,
+			phba->sli4_hba.max_cfg_param.xri_used);
+	return NO_XRI;
+}
+
+/**
+ * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post a block of driver's sgl pages to the
+ * HBA using non-embedded mailbox command. No Lock is held. This routine
+ * is only called when the driver is loading and after all IO has been
+ * stopped.
+ **/
+int
+lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
+{
+	struct lpfc_sglq *sglq_entry;
+	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+	struct sgl_page_pairs *sgl_pg_pairs;
+	void *viraddr;
+	LPFC_MBOXQ_t *mbox;
+	uint32_t reqlen, alloclen, pg_pairs;
+	uint32_t mbox_tmo;
+	uint16_t xritag_start = 0, lxri = 0;
+	int els_xri_cnt, rc = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	/* The number of sgls to be posted */
+	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+
+	reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
+		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+	if (reqlen > SLI4_PAGE_SIZE) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"2559 Block sgl registration required DMA "
+				"size (%d) great than a page\n", reqlen);
+		return -ENOMEM;
+	}
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	/* Allocate DMA memory and set up the non-embedded mailbox command */
+	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
+			 LPFC_SLI4_MBX_NEMBED);
+
+	if (alloclen < reqlen) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0285 Allocated DMA memory size (%d) is "
+				"less than the requested DMA memory "
+				"size (%d)\n", alloclen, reqlen);
+		lpfc_sli4_mbox_cmd_free(phba, mbox);
+		return -ENOMEM;
+	}
+	/* Set up the SGL pages in the non-embedded DMA pages */
+	viraddr = mbox->sge_array->addr[0];
+	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+	sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+	for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
+		sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
+
+		/*
+		 * Assign the sglq a physical xri only if the driver has not
+		 * initialized those resources.  A port reset only needs
+		 * the sglq's posted.
+		 */
+		if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
+		    LPFC_XRI_RSRC_RDY) {
+			lxri = lpfc_sli4_next_xritag(phba);
+			if (lxri == NO_XRI) {
+				lpfc_sli4_mbox_cmd_free(phba, mbox);
+				return -ENOMEM;
+			}
+			sglq_entry->sli4_lxritag = lxri;
+			sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+		}
+
+		/* Set up the sge entry */
+		sgl_pg_pairs->sgl_pg0_addr_lo =
+				cpu_to_le32(putPaddrLow(sglq_entry->phys));
+		sgl_pg_pairs->sgl_pg0_addr_hi =
+				cpu_to_le32(putPaddrHigh(sglq_entry->phys));
+		sgl_pg_pairs->sgl_pg1_addr_lo =
+				cpu_to_le32(putPaddrLow(0));
+		sgl_pg_pairs->sgl_pg1_addr_hi =
+				cpu_to_le32(putPaddrHigh(0));
+
+		/* Keep the first xritag on the list */
+		if (pg_pairs == 0)
+			xritag_start = sglq_entry->sli4_xritag;
+		sgl_pg_pairs++;
+	}
+
+	/* Complete initialization and perform endian conversion. */
+	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+	bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
+	sgl->word0 = cpu_to_le32(sgl->word0);
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+	}
+	shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (rc != MBX_TIMEOUT)
+		lpfc_sli4_mbox_cmd_free(phba, mbox);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2513 POST_SGL_BLOCK mailbox command failed "
+				"status x%x add_status x%x mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		rc = -ENXIO;
+	}
+
+	if (rc == 0)
+		bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+		       LPFC_XRI_RSRC_RDY);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post a block of driver's sgl pages to the
+ * HBA using non-embedded mailbox command. No Lock is held. This routine
+ * is only called when the driver is loading and after all IO has been
+ * stopped.
+ **/
+int
+lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
+{
+	struct lpfc_sglq *sglq_entry;
+	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+	struct sgl_page_pairs *sgl_pg_pairs;
+	void *viraddr;
+	LPFC_MBOXQ_t *mbox;
+	uint32_t reqlen, alloclen, index;
+	uint32_t mbox_tmo;
+	uint16_t rsrc_start, rsrc_size, els_xri_cnt, post_els_xri_cnt;
+	uint16_t xritag_start = 0, lxri = 0;
+	struct lpfc_rsrc_blks *rsrc_blk;
+	int cnt, ttl_cnt, rc = 0;
+	int loop_cnt;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	/* The number of sgls to be posted */
+	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+
+	reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
+		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+	if (reqlen > SLI4_PAGE_SIZE) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"2989 Block sgl registration required DMA "
+				"size (%d) great than a page\n", reqlen);
+		return -ENOMEM;
+	}
+
+	cnt = 0;
+	ttl_cnt = 0;
+	post_els_xri_cnt = els_xri_cnt;
+	list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
+			    list) {
+		rsrc_start = rsrc_blk->rsrc_start;
+		rsrc_size = rsrc_blk->rsrc_size;
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"3014 Working ELS Extent start %d, cnt %d\n",
+				rsrc_start, rsrc_size);
+
+		loop_cnt = min(post_els_xri_cnt, rsrc_size);
+		if (loop_cnt < post_els_xri_cnt) {
+			post_els_xri_cnt -= loop_cnt;
+			ttl_cnt += loop_cnt;
+		} else
+			ttl_cnt += post_els_xri_cnt;
+
+		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (!mbox)
+			return -ENOMEM;
+		/*
+		 * Allocate DMA memory and set up the non-embedded mailbox
+		 * command.
+		 */
+		alloclen = lpfc_sli4_config(phba, mbox,
+					LPFC_MBOX_SUBSYSTEM_FCOE,
+					LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+					reqlen, LPFC_SLI4_MBX_NEMBED);
+		if (alloclen < reqlen) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2987 Allocated DMA memory size (%d) "
+					"is less than the requested DMA memory "
+					"size (%d)\n", alloclen, reqlen);
+			lpfc_sli4_mbox_cmd_free(phba, mbox);
+			return -ENOMEM;
+		}
+
+		/* Set up the SGL pages in the non-embedded DMA pages */
+		viraddr = mbox->sge_array->addr[0];
+		sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+		sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+		/*
+		 * The starting resource may not begin at zero. Control
+		 * the loop variants via the block resource parameters,
+		 * but handle the sge pointers with a zero-based index
+		 * that doesn't get reset per loop pass.
+		 */
+		for (index = rsrc_start;
+		     index < rsrc_start + loop_cnt;
+		     index++) {
+			sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt];
+
+			/*
+			 * Assign the sglq a physical xri only if the driver
+			 * has not initialized those resources.  A port reset
+			 * only needs the sglq's posted.
+			 */
+			if (bf_get(lpfc_xri_rsrc_rdy,
+				   &phba->sli4_hba.sli4_flags) !=
+				   LPFC_XRI_RSRC_RDY) {
+				lxri = lpfc_sli4_next_xritag(phba);
+				if (lxri == NO_XRI) {
+					lpfc_sli4_mbox_cmd_free(phba, mbox);
+					rc = -ENOMEM;
+					goto err_exit;
+				}
+				sglq_entry->sli4_lxritag = lxri;
+				sglq_entry->sli4_xritag =
+						phba->sli4_hba.xri_ids[lxri];
+			}
+
+			/* Set up the sge entry */
+			sgl_pg_pairs->sgl_pg0_addr_lo =
+				cpu_to_le32(putPaddrLow(sglq_entry->phys));
+			sgl_pg_pairs->sgl_pg0_addr_hi =
+				cpu_to_le32(putPaddrHigh(sglq_entry->phys));
+			sgl_pg_pairs->sgl_pg1_addr_lo =
+				cpu_to_le32(putPaddrLow(0));
+			sgl_pg_pairs->sgl_pg1_addr_hi =
+				cpu_to_le32(putPaddrHigh(0));
+
+			/* Track the starting physical XRI for the mailbox. */
+			if (index == rsrc_start)
+				xritag_start = sglq_entry->sli4_xritag;
+			sgl_pg_pairs++;
+			cnt++;
+		}
+
+		/* Complete initialization and perform endian conversion. */
+		rsrc_blk->rsrc_used += loop_cnt;
+		bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+		bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt);
+		sgl->word0 = cpu_to_le32(sgl->word0);
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"3015 Post ELS Extent SGL, start %d, "
+				"cnt %d, used %d\n",
+				xritag_start, loop_cnt, rsrc_blk->rsrc_used);
+		if (!phba->sli4_hba.intr_enable)
+			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+		else {
+			mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+			rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+		}
+		shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+		shdr_status = bf_get(lpfc_mbox_hdr_status,
+				     &shdr->response);
+		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+					 &shdr->response);
+		if (rc != MBX_TIMEOUT)
+			lpfc_sli4_mbox_cmd_free(phba, mbox);
+		if (shdr_status || shdr_add_status || rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2988 POST_SGL_BLOCK mailbox "
+					"command failed status x%x "
+					"add_status x%x mbx status x%x\n",
+					shdr_status, shdr_add_status, rc);
+			rc = -ENXIO;
+			goto err_exit;
+		}
+		if (ttl_cnt >= els_xri_cnt)
+			break;
+	}
+
+ err_exit:
+	if (rc == 0)
+		bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+		       LPFC_XRI_RSRC_RDY);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
+ * @phba: pointer to lpfc hba data structure.
+ * @sblist: pointer to scsi buffer list.
+ * @count: number of scsi buffers on the list.
+ *
+ * This routine is invoked to post a block of @count scsi sgl pages from a
+ * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
+ * No Lock is held.
+ *
+ **/
+int
+lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
+			      int cnt)
+{
+	struct lpfc_scsi_buf *psb;
+	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+	struct sgl_page_pairs *sgl_pg_pairs;
+	void *viraddr;
+	LPFC_MBOXQ_t *mbox;
+	uint32_t reqlen, alloclen, pg_pairs;
+	uint32_t mbox_tmo;
+	uint16_t xritag_start = 0;
+	int rc = 0;
+	uint32_t shdr_status, shdr_add_status;
+	dma_addr_t pdma_phys_bpl1;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	/* Calculate the requested length of the dma memory */
+	reqlen = cnt * sizeof(struct sgl_page_pairs) +
+		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+	if (reqlen > SLI4_PAGE_SIZE) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"0217 Block sgl registration required DMA "
+				"size (%d) great than a page\n", reqlen);
+		return -ENOMEM;
+	}
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"0283 Failed to allocate mbox cmd memory\n");
+		return -ENOMEM;
+	}
+
+	/* Allocate DMA memory and set up the non-embedded mailbox command */
+	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+				LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
+				LPFC_SLI4_MBX_NEMBED);
+
+	if (alloclen < reqlen) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2561 Allocated DMA memory size (%d) is "
+				"less than the requested DMA memory "
+				"size (%d)\n", alloclen, reqlen);
+		lpfc_sli4_mbox_cmd_free(phba, mbox);
+		return -ENOMEM;
+	}
+
+	/* Get the first SGE entry from the non-embedded DMA memory */
+	viraddr = mbox->sge_array->addr[0];
+
+	/* Set up the SGL pages in the non-embedded DMA pages */
+	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+	sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+	pg_pairs = 0;
+	list_for_each_entry(psb, sblist, list) {
+		/* Set up the sge entry */
+		sgl_pg_pairs->sgl_pg0_addr_lo =
+			cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
+		sgl_pg_pairs->sgl_pg0_addr_hi =
+			cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
+		if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+			pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
+		else
+			pdma_phys_bpl1 = 0;
+		sgl_pg_pairs->sgl_pg1_addr_lo =
+			cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
+		sgl_pg_pairs->sgl_pg1_addr_hi =
+			cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
+		/* Keep the first xritag on the list */
+		if (pg_pairs == 0)
+			xritag_start = psb->cur_iocbq.sli4_xritag;
+		sgl_pg_pairs++;
+		pg_pairs++;
+	}
+	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+	bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
+	/* Perform endian conversion if necessary */
+	sgl->word0 = cpu_to_le32(sgl->word0);
+
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+	}
+	shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (rc != MBX_TIMEOUT)
+		lpfc_sli4_mbox_cmd_free(phba, mbox);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2564 POST_SGL_BLOCK mailbox command failed "
+				"status x%x add_status x%x mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		rc = -ENXIO;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port.
+ * @phba: pointer to lpfc hba data structure.
+ * @sblist: pointer to scsi buffer list.
+ * @count: number of scsi buffers on the list.
+ *
+ * This routine is invoked to post a block of @count scsi sgl pages from a
+ * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
+ * No Lock is held.
+ *
+ **/
+int
+lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist,
+				int cnt)
+{
+	struct lpfc_scsi_buf *psb = NULL;
+	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+	struct sgl_page_pairs *sgl_pg_pairs;
+	void *viraddr;
+	LPFC_MBOXQ_t *mbox;
+	uint32_t reqlen, alloclen, pg_pairs;
+	uint32_t mbox_tmo;
+	uint16_t xri_start = 0, scsi_xri_start;
+	uint16_t rsrc_range;
+	int rc = 0, avail_cnt;
+	uint32_t shdr_status, shdr_add_status;
+	dma_addr_t pdma_phys_bpl1;
+	union lpfc_sli4_cfg_shdr *shdr;
+	struct lpfc_rsrc_blks *rsrc_blk;
+	uint32_t xri_cnt = 0;
+
+	/* Calculate the total requested length of the dma memory */
+	reqlen = cnt * sizeof(struct sgl_page_pairs) +
+		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+	if (reqlen > SLI4_PAGE_SIZE) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+				"2932 Block sgl registration required DMA "
+				"size (%d) great than a page\n", reqlen);
+		return -ENOMEM;
+	}
+
+	/*
+	 * The use of extents requires the driver to post the sgl headers
+	 * in multiple postings to meet the contiguous resource assignment.
+	 */
+	psb = list_prepare_entry(psb, sblist, list);
+	scsi_xri_start = phba->sli4_hba.scsi_xri_start;
+	list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
+			    list) {
+		rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size;
+		if (rsrc_range < scsi_xri_start)
+			continue;
+		else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size)
+			continue;
+		else
+			avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used;
+
+		reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) +
+			sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+		/*
+		 * Allocate DMA memory and set up the non-embedded mailbox
+		 * command. The mbox is used to post an SGL page per loop
+		 * but the DMA memory has a use-once semantic so the mailbox
+		 * is used and freed per loop pass.
+		 */
+		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (!mbox) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2933 Failed to allocate mbox cmd "
+					"memory\n");
+			return -ENOMEM;
+		}
+		alloclen = lpfc_sli4_config(phba, mbox,
+					LPFC_MBOX_SUBSYSTEM_FCOE,
+					LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+					reqlen,
+					LPFC_SLI4_MBX_NEMBED);
+		if (alloclen < reqlen) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2934 Allocated DMA memory size (%d) "
+					"is less than the requested DMA memory "
+					"size (%d)\n", alloclen, reqlen);
+			lpfc_sli4_mbox_cmd_free(phba, mbox);
+			return -ENOMEM;
+		}
+
+		/* Get the first SGE entry from the non-embedded DMA memory */
+		viraddr = mbox->sge_array->addr[0];
+
+		/* Set up the SGL pages in the non-embedded DMA pages */
+		sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+		sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+		/* pg_pairs tracks posted SGEs per loop iteration. */
+		pg_pairs = 0;
+		list_for_each_entry_continue(psb, sblist, list) {
+			/* Set up the sge entry */
+			sgl_pg_pairs->sgl_pg0_addr_lo =
+				cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
+			sgl_pg_pairs->sgl_pg0_addr_hi =
+				cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
+			if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+				pdma_phys_bpl1 = psb->dma_phys_bpl +
+					SGL_PAGE_SIZE;
+			else
+				pdma_phys_bpl1 = 0;
+			sgl_pg_pairs->sgl_pg1_addr_lo =
+				cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
+			sgl_pg_pairs->sgl_pg1_addr_hi =
+				cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
+			/* Keep the first xri for this extent. */
+			if (pg_pairs == 0)
+				xri_start = psb->cur_iocbq.sli4_xritag;
+			sgl_pg_pairs++;
+			pg_pairs++;
+			xri_cnt++;
+
+			/*
+			 * Track two exit conditions - the loop has constructed
+			 * all of the caller's SGE pairs or all available
+			 * resource IDs in this extent are consumed.
+			 */
+			if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt))
+				break;
+		}
+		rsrc_blk->rsrc_used += pg_pairs;
+		bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start);
+		bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
+
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"3016 Post SCSI Extent SGL, start %d, cnt %d "
+				"blk use %d\n",
+				xri_start, pg_pairs, rsrc_blk->rsrc_used);
+		/* Perform endian conversion if necessary */
+		sgl->word0 = cpu_to_le32(sgl->word0);
+		if (!phba->sli4_hba.intr_enable)
+			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+		else {
+			mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+			rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+		}
+		shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+					 &shdr->response);
+		if (rc != MBX_TIMEOUT)
+			lpfc_sli4_mbox_cmd_free(phba, mbox);
+		if (shdr_status || shdr_add_status || rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2935 POST_SGL_BLOCK mailbox command "
+					"failed status x%x add_status x%x "
+					"mbx status x%x\n",
+					shdr_status, shdr_add_status, rc);
+			return -ENXIO;
+		}
+
+		/* Post only what is requested. */
+		if (xri_cnt >= cnt)
+			break;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
+ * @phba: pointer to lpfc_hba struct that the frame was received on
+ * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
+ *
+ * This function checks the fields in the @fc_hdr to see if the FC frame is a
+ * valid type of frame that the LPFC driver will handle. This function will
+ * return a zero if the frame is a valid frame or a non zero value when the
+ * frame does not pass the check.
+ **/
+static int
+lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
+{
+	/*  make rctl_names static to save stack space */
+	static char *rctl_names[] = FC_RCTL_NAMES_INIT;
+	char *type_names[] = FC_TYPE_NAMES_INIT;
+	struct fc_vft_header *fc_vft_hdr;
+	uint32_t *header = (uint32_t *) fc_hdr;
+
+	switch (fc_hdr->fh_r_ctl) {
+	case FC_RCTL_DD_UNCAT:		/* uncategorized information */
+	case FC_RCTL_DD_SOL_DATA:	/* solicited data */
+	case FC_RCTL_DD_UNSOL_CTL:	/* unsolicited control */
+	case FC_RCTL_DD_SOL_CTL:	/* solicited control or reply */
+	case FC_RCTL_DD_UNSOL_DATA:	/* unsolicited data */
+	case FC_RCTL_DD_DATA_DESC:	/* data descriptor */
+	case FC_RCTL_DD_UNSOL_CMD:	/* unsolicited command */
+	case FC_RCTL_DD_CMD_STATUS:	/* command status */
+	case FC_RCTL_ELS_REQ:	/* extended link services request */
+	case FC_RCTL_ELS_REP:	/* extended link services reply */
+	case FC_RCTL_ELS4_REQ:	/* FC-4 ELS request */
+	case FC_RCTL_ELS4_REP:	/* FC-4 ELS reply */
+	case FC_RCTL_BA_NOP:  	/* basic link service NOP */
+	case FC_RCTL_BA_ABTS: 	/* basic link service abort */
+	case FC_RCTL_BA_RMC: 	/* remove connection */
+	case FC_RCTL_BA_ACC:	/* basic accept */
+	case FC_RCTL_BA_RJT:	/* basic reject */
+	case FC_RCTL_BA_PRMT:
+	case FC_RCTL_ACK_1:	/* acknowledge_1 */
+	case FC_RCTL_ACK_0:	/* acknowledge_0 */
+	case FC_RCTL_P_RJT:	/* port reject */
+	case FC_RCTL_F_RJT:	/* fabric reject */
+	case FC_RCTL_P_BSY:	/* port busy */
+	case FC_RCTL_F_BSY:	/* fabric busy to data frame */
+	case FC_RCTL_F_BSYL:	/* fabric busy to link control frame */
+	case FC_RCTL_LCR:	/* link credit reset */
+	case FC_RCTL_END:	/* end */
+		break;
+	case FC_RCTL_VFTH:	/* Virtual Fabric tagging Header */
+		fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
+		fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
+		return lpfc_fc_frame_check(phba, fc_hdr);
+	default:
+		goto drop;
+	}
+	switch (fc_hdr->fh_type) {
+	case FC_TYPE_BLS:
+	case FC_TYPE_ELS:
+	case FC_TYPE_FCP:
+	case FC_TYPE_CT:
+		break;
+	case FC_TYPE_IP:
+	case FC_TYPE_ILS:
+	default:
+		goto drop;
+	}
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+			"2538 Received frame rctl:%s type:%s "
+			"Frame Data:%08x %08x %08x %08x %08x %08x\n",
+			rctl_names[fc_hdr->fh_r_ctl],
+			type_names[fc_hdr->fh_type],
+			be32_to_cpu(header[0]), be32_to_cpu(header[1]),
+			be32_to_cpu(header[2]), be32_to_cpu(header[3]),
+			be32_to_cpu(header[4]), be32_to_cpu(header[5]));
+	return 0;
+drop:
+	lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+			"2539 Dropped frame rctl:%s type:%s\n",
+			rctl_names[fc_hdr->fh_r_ctl],
+			type_names[fc_hdr->fh_type]);
+	return 1;
+}
+
+/**
+ * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
+ * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
+ *
+ * This function processes the FC header to retrieve the VFI from the VF
+ * header, if one exists. This function will return the VFI if one exists
+ * or 0 if no VSAN Header exists.
+ **/
+static uint32_t
+lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
+{
+	struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
+
+	if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
+		return 0;
+	return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
+}
+
+/**
+ * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
+ * @phba: Pointer to the HBA structure to search for the vport on
+ * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
+ * @fcfi: The FC Fabric ID that the frame came from
+ *
+ * This function searches the @phba for a vport that matches the content of the
+ * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
+ * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
+ * returns the matching vport pointer or NULL if unable to match frame to a
+ * vport.
+ **/
+static struct lpfc_vport *
+lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
+		       uint16_t fcfi)
+{
+	struct lpfc_vport **vports;
+	struct lpfc_vport *vport = NULL;
+	int i;
+	uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
+			fc_hdr->fh_d_id[1] << 8 |
+			fc_hdr->fh_d_id[2]);
+	if (did == Fabric_DID)
+		return phba->pport;
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports != NULL)
+		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+			if (phba->fcf.fcfi == fcfi &&
+			    vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
+			    vports[i]->fc_myDID == did) {
+				vport = vports[i];
+				break;
+			}
+		}
+	lpfc_destroy_vport_work_array(phba, vports);
+	return vport;
+}
+
+/**
+ * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
+ * @vport: The vport to work on.
+ *
+ * This function updates the receive sequence time stamp for this vport. The
+ * receive sequence time stamp indicates the time that the last frame of the
+ * the sequence that has been idle for the longest amount of time was received.
+ * the driver uses this time stamp to indicate if any received sequences have
+ * timed out.
+ **/
+void
+lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
+{
+	struct lpfc_dmabuf *h_buf;
+	struct hbq_dmabuf *dmabuf = NULL;
+
+	/* get the oldest sequence on the rcv list */
+	h_buf = list_get_first(&vport->rcv_buffer_list,
+			       struct lpfc_dmabuf, list);
+	if (!h_buf)
+		return;
+	dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+	vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
+}
+
+/**
+ * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
+ * @vport: The vport that the received sequences were sent to.
+ *
+ * This function cleans up all outstanding received sequences. This is called
+ * by the driver when a link event or user action invalidates all the received
+ * sequences.
+ **/
+void
+lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
+{
+	struct lpfc_dmabuf *h_buf, *hnext;
+	struct lpfc_dmabuf *d_buf, *dnext;
+	struct hbq_dmabuf *dmabuf = NULL;
+
+	/* start with the oldest sequence on the rcv list */
+	list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
+		dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+		list_del_init(&dmabuf->hbuf.list);
+		list_for_each_entry_safe(d_buf, dnext,
+					 &dmabuf->dbuf.list, list) {
+			list_del_init(&d_buf->list);
+			lpfc_in_buf_free(vport->phba, d_buf);
+		}
+		lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
+	}
+}
+
+/**
+ * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
+ * @vport: The vport that the received sequences were sent to.
+ *
+ * This function determines whether any received sequences have timed out by
+ * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
+ * indicates that there is at least one timed out sequence this routine will
+ * go through the received sequences one at a time from most inactive to most
+ * active to determine which ones need to be cleaned up. Once it has determined
+ * that a sequence needs to be cleaned up it will simply free up the resources
+ * without sending an abort.
+ **/
+void
+lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
+{
+	struct lpfc_dmabuf *h_buf, *hnext;
+	struct lpfc_dmabuf *d_buf, *dnext;
+	struct hbq_dmabuf *dmabuf = NULL;
+	unsigned long timeout;
+	int abort_count = 0;
+
+	timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
+		   vport->rcv_buffer_time_stamp);
+	if (list_empty(&vport->rcv_buffer_list) ||
+	    time_before(jiffies, timeout))
+		return;
+	/* start with the oldest sequence on the rcv list */
+	list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
+		dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+		timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
+			   dmabuf->time_stamp);
+		if (time_before(jiffies, timeout))
+			break;
+		abort_count++;
+		list_del_init(&dmabuf->hbuf.list);
+		list_for_each_entry_safe(d_buf, dnext,
+					 &dmabuf->dbuf.list, list) {
+			list_del_init(&d_buf->list);
+			lpfc_in_buf_free(vport->phba, d_buf);
+		}
+		lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
+	}
+	if (abort_count)
+		lpfc_update_rcv_time_stamp(vport);
+}
+
+/**
+ * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
+ * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
+ *
+ * This function searches through the existing incomplete sequences that have
+ * been sent to this @vport. If the frame matches one of the incomplete
+ * sequences then the dbuf in the @dmabuf is added to the list of frames that
+ * make up that sequence. If no sequence is found that matches this frame then
+ * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
+ * This function returns a pointer to the first dmabuf in the sequence list that
+ * the frame was linked to.
+ **/
+static struct hbq_dmabuf *
+lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
+{
+	struct fc_frame_header *new_hdr;
+	struct fc_frame_header *temp_hdr;
+	struct lpfc_dmabuf *d_buf;
+	struct lpfc_dmabuf *h_buf;
+	struct hbq_dmabuf *seq_dmabuf = NULL;
+	struct hbq_dmabuf *temp_dmabuf = NULL;
+
+	INIT_LIST_HEAD(&dmabuf->dbuf.list);
+	dmabuf->time_stamp = jiffies;
+	new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+	/* Use the hdr_buf to find the sequence that this frame belongs to */
+	list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
+		temp_hdr = (struct fc_frame_header *)h_buf->virt;
+		if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
+		    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
+		    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
+			continue;
+		/* found a pending sequence that matches this frame */
+		seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+		break;
+	}
+	if (!seq_dmabuf) {
+		/*
+		 * This indicates first frame received for this sequence.
+		 * Queue the buffer on the vport's rcv_buffer_list.
+		 */
+		list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
+		lpfc_update_rcv_time_stamp(vport);
+		return dmabuf;
+	}
+	temp_hdr = seq_dmabuf->hbuf.virt;
+	if (be16_to_cpu(new_hdr->fh_seq_cnt) <
+		be16_to_cpu(temp_hdr->fh_seq_cnt)) {
+		list_del_init(&seq_dmabuf->hbuf.list);
+		list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
+		list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
+		lpfc_update_rcv_time_stamp(vport);
+		return dmabuf;
+	}
+	/* move this sequence to the tail to indicate a young sequence */
+	list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
+	seq_dmabuf->time_stamp = jiffies;
+	lpfc_update_rcv_time_stamp(vport);
+	if (list_empty(&seq_dmabuf->dbuf.list)) {
+		temp_hdr = dmabuf->hbuf.virt;
+		list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
+		return seq_dmabuf;
+	}
+	/* find the correct place in the sequence to insert this frame */
+	list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
+		temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+		temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
+		/*
+		 * If the frame's sequence count is greater than the frame on
+		 * the list then insert the frame right after this frame
+		 */
+		if (be16_to_cpu(new_hdr->fh_seq_cnt) >
+			be16_to_cpu(temp_hdr->fh_seq_cnt)) {
+			list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
+			return seq_dmabuf;
+		}
+	}
+	return NULL;
+}
+
+/**
+ * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
+ * @vport: pointer to a vitural port
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function tries to abort from the partially assembed sequence, described
+ * by the information from basic abbort @dmabuf. It checks to see whether such
+ * partially assembled sequence held by the driver. If so, it shall free up all
+ * the frames from the partially assembled sequence.
+ *
+ * Return
+ * true  -- if there is matching partially assembled sequence present and all
+ *          the frames freed with the sequence;
+ * false -- if there is no matching partially assembled sequence present so
+ *          nothing got aborted in the lower layer driver
+ **/
+static bool
+lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
+			    struct hbq_dmabuf *dmabuf)
+{
+	struct fc_frame_header *new_hdr;
+	struct fc_frame_header *temp_hdr;
+	struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
+	struct hbq_dmabuf *seq_dmabuf = NULL;
+
+	/* Use the hdr_buf to find the sequence that matches this frame */
+	INIT_LIST_HEAD(&dmabuf->dbuf.list);
+	INIT_LIST_HEAD(&dmabuf->hbuf.list);
+	new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+	list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
+		temp_hdr = (struct fc_frame_header *)h_buf->virt;
+		if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
+		    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
+		    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
+			continue;
+		/* found a pending sequence that matches this frame */
+		seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+		break;
+	}
+
+	/* Free up all the frames from the partially assembled sequence */
+	if (seq_dmabuf) {
+		list_for_each_entry_safe(d_buf, n_buf,
+					 &seq_dmabuf->dbuf.list, list) {
+			list_del_init(&d_buf->list);
+			lpfc_in_buf_free(vport->phba, d_buf);
+		}
+		return true;
+	}
+	return false;
+}
+
+/**
+ * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
+ * @phba: Pointer to HBA context object.
+ * @cmd_iocbq: pointer to the command iocbq structure.
+ * @rsp_iocbq: pointer to the response iocbq structure.
+ *
+ * This function handles the sequence abort response iocb command complete
+ * event. It properly releases the memory allocated to the sequence abort
+ * accept iocb.
+ **/
+static void
+lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
+			     struct lpfc_iocbq *cmd_iocbq,
+			     struct lpfc_iocbq *rsp_iocbq)
+{
+	if (cmd_iocbq)
+		lpfc_sli_release_iocbq(phba, cmd_iocbq);
+
+	/* Failure means BLS ABORT RSP did not get delivered to remote node*/
+	if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+			"3154 BLS ABORT RSP failed, data:  x%x/x%x\n",
+			rsp_iocbq->iocb.ulpStatus,
+			rsp_iocbq->iocb.un.ulpWord[4]);
+}
+
+/**
+ * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
+ * @phba: Pointer to HBA context object.
+ * @xri: xri id in transaction.
+ *
+ * This function validates the xri maps to the known range of XRIs allocated an
+ * used by the driver.
+ **/
+uint16_t
+lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
+		      uint16_t xri)
+{
+	int i;
+
+	for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
+		if (xri == phba->sli4_hba.xri_ids[i])
+			return i;
+	}
+	return NO_XRI;
+}
+
+
+/**
+ * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
+ * @phba: Pointer to HBA context object.
+ * @fc_hdr: pointer to a FC frame header.
+ *
+ * This function sends a basic response to a previous unsol sequence abort
+ * event after aborting the sequence handling.
+ **/
+static void
+lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
+			struct fc_frame_header *fc_hdr)
+{
+	struct lpfc_iocbq *ctiocb = NULL;
+	struct lpfc_nodelist *ndlp;
+	uint16_t oxid, rxid;
+	uint32_t sid, fctl;
+	IOCB_t *icmd;
+	int rc;
+
+	if (!lpfc_is_link_up(phba))
+		return;
+
+	sid = sli4_sid_from_fc_hdr(fc_hdr);
+	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+	rxid = be16_to_cpu(fc_hdr->fh_rx_id);
+
+	ndlp = lpfc_findnode_did(phba->pport, sid);
+	if (!ndlp) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+				"1268 Find ndlp returned NULL for oxid:x%x "
+				"SID:x%x\n", oxid, sid);
+		return;
+	}
+	if (lpfc_sli4_xri_inrange(phba, rxid))
+		lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
+
+	/* Allocate buffer for rsp iocb */
+	ctiocb = lpfc_sli_get_iocbq(phba);
+	if (!ctiocb)
+		return;
+
+	/* Extract the F_CTL field from FC_HDR */
+	fctl = sli4_fctl_from_fc_hdr(fc_hdr);
+
+	icmd = &ctiocb->iocb;
+	icmd->un.xseq64.bdl.bdeSize = 0;
+	icmd->un.xseq64.bdl.ulpIoTag32 = 0;
+	icmd->un.xseq64.w5.hcsw.Dfctl = 0;
+	icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
+	icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
+
+	/* Fill in the rest of iocb fields */
+	icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
+	icmd->ulpBdeCount = 0;
+	icmd->ulpLe = 1;
+	icmd->ulpClass = CLASS3;
+	icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+	ctiocb->context1 = ndlp;
+
+	ctiocb->iocb_cmpl = NULL;
+	ctiocb->vport = phba->pport;
+	ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
+	ctiocb->sli4_lxritag = NO_XRI;
+	ctiocb->sli4_xritag = NO_XRI;
+
+	/* If the oxid maps to the FCP XRI range or if it is out of range,
+	 * send a BLS_RJT.  The driver no longer has that exchange.
+	 * Override the IOCB for a BA_RJT.
+	 */
+	if (oxid > (phba->sli4_hba.max_cfg_param.max_xri +
+		    phba->sli4_hba.max_cfg_param.xri_base) ||
+	    oxid > (lpfc_sli4_get_els_iocb_cnt(phba) +
+		    phba->sli4_hba.max_cfg_param.xri_base)) {
+		icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
+		bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
+		bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
+		bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+	}
+
+	if (fctl & FC_FC_EX_CTX) {
+		/* ABTS sent by responder to CT exchange, construction
+		 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
+		 * field and RX_ID from ABTS for RX_ID field.
+		 */
+		bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
+	} else {
+		/* ABTS sent by initiator to CT exchange, construction
+		 * of BA_ACC will need to allocate a new XRI as for the
+		 * XRI_TAG field.
+		 */
+		bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
+	}
+	bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
+	bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
+
+	/* Xmit CT abts response on exchange <xid> */
+	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+			"1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
+			icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
+
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
+	if (rc == IOCB_ERROR) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+				"2925 Failed to issue CT ABTS RSP x%x on "
+				"xri x%x, Data x%x\n",
+				icmd->un.xseq64.w5.hcsw.Rctl, oxid,
+				phba->link_state);
+		lpfc_sli_release_iocbq(phba, ctiocb);
+	}
+}
+
+/**
+ * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
+ * @vport: Pointer to the vport on which this sequence was received
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function handles an SLI-4 unsolicited abort event. If the unsolicited
+ * receive sequence is only partially assembed by the driver, it shall abort
+ * the partially assembled frames for the sequence. Otherwise, if the
+ * unsolicited receive sequence has been completely assembled and passed to
+ * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
+ * unsolicited sequence has been aborted. After that, it will issue a basic
+ * accept to accept the abort.
+ **/
+void
+lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
+			     struct hbq_dmabuf *dmabuf)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct fc_frame_header fc_hdr;
+	uint32_t fctl;
+	bool abts_par;
+
+	/* Make a copy of fc_hdr before the dmabuf being released */
+	memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
+	fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
+
+	if (fctl & FC_FC_EX_CTX) {
+		/*
+		 * ABTS sent by responder to exchange, just free the buffer
+		 */
+		lpfc_in_buf_free(phba, &dmabuf->dbuf);
+	} else {
+		/*
+		 * ABTS sent by initiator to exchange, need to do cleanup
+		 */
+		/* Try to abort partially assembled seq */
+		abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
+
+		/* Send abort to ULP if partially seq abort failed */
+		if (abts_par == false)
+			lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
+		else
+			lpfc_in_buf_free(phba, &dmabuf->dbuf);
+	}
+	/* Send basic accept (BA_ACC) to the abort requester */
+	lpfc_sli4_seq_abort_rsp(phba, &fc_hdr);
+}
+
+/**
+ * lpfc_seq_complete - Indicates if a sequence is complete
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function checks the sequence, starting with the frame described by
+ * @dmabuf, to see if all the frames associated with this sequence are present.
+ * the frames associated with this sequence are linked to the @dmabuf using the
+ * dbuf list. This function looks for two major things. 1) That the first frame
+ * has a sequence count of zero. 2) There is a frame with last frame of sequence
+ * set. 3) That there are no holes in the sequence count. The function will
+ * return 1 when the sequence is complete, otherwise it will return 0.
+ **/
+static int
+lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
+{
+	struct fc_frame_header *hdr;
+	struct lpfc_dmabuf *d_buf;
+	struct hbq_dmabuf *seq_dmabuf;
+	uint32_t fctl;
+	int seq_count = 0;
+
+	hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+	/* make sure first fame of sequence has a sequence count of zero */
+	if (hdr->fh_seq_cnt != seq_count)
+		return 0;
+	fctl = (hdr->fh_f_ctl[0] << 16 |
+		hdr->fh_f_ctl[1] << 8 |
+		hdr->fh_f_ctl[2]);
+	/* If last frame of sequence we can return success. */
+	if (fctl & FC_FC_END_SEQ)
+		return 1;
+	list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
+		seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+		hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+		/* If there is a hole in the sequence count then fail. */
+		if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
+			return 0;
+		fctl = (hdr->fh_f_ctl[0] << 16 |
+			hdr->fh_f_ctl[1] << 8 |
+			hdr->fh_f_ctl[2]);
+		/* If last frame of sequence we can return success. */
+		if (fctl & FC_FC_END_SEQ)
+			return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_prep_seq - Prep sequence for ULP processing
+ * @vport: Pointer to the vport on which this sequence was received
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function takes a sequence, described by a list of frames, and creates
+ * a list of iocbq structures to describe the sequence. This iocbq list will be
+ * used to issue to the generic unsolicited sequence handler. This routine
+ * returns a pointer to the first iocbq in the list. If the function is unable
+ * to allocate an iocbq then it throw out the received frames that were not
+ * able to be described and return a pointer to the first iocbq. If unable to
+ * allocate any iocbqs (including the first) this function will return NULL.
+ **/
+static struct lpfc_iocbq *
+lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
+{
+	struct hbq_dmabuf *hbq_buf;
+	struct lpfc_dmabuf *d_buf, *n_buf;
+	struct lpfc_iocbq *first_iocbq, *iocbq;
+	struct fc_frame_header *fc_hdr;
+	uint32_t sid;
+	uint32_t len, tot_len;
+	struct ulp_bde64 *pbde;
+
+	fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+	/* remove from receive buffer list */
+	list_del_init(&seq_dmabuf->hbuf.list);
+	lpfc_update_rcv_time_stamp(vport);
+	/* get the Remote Port's SID */
+	sid = sli4_sid_from_fc_hdr(fc_hdr);
+	tot_len = 0;
+	/* Get an iocbq struct to fill in. */
+	first_iocbq = lpfc_sli_get_iocbq(vport->phba);
+	if (first_iocbq) {
+		/* Initialize the first IOCB. */
+		first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
+		first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
+		first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
+		first_iocbq->iocb.ulpContext = NO_XRI;
+		first_iocbq->iocb.unsli3.rcvsli3.ox_id =
+			be16_to_cpu(fc_hdr->fh_ox_id);
+		/* iocbq is prepped for internal consumption.  Physical vpi. */
+		first_iocbq->iocb.unsli3.rcvsli3.vpi =
+			vport->phba->vpi_ids[vport->vpi];
+		/* put the first buffer into the first IOCBq */
+		first_iocbq->context2 = &seq_dmabuf->dbuf;
+		first_iocbq->context3 = NULL;
+		first_iocbq->iocb.ulpBdeCount = 1;
+		first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+							LPFC_DATA_BUF_SIZE;
+		first_iocbq->iocb.un.rcvels.remoteID = sid;
+		tot_len = bf_get(lpfc_rcqe_length,
+				       &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+		first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
+	}
+	iocbq = first_iocbq;
+	/*
+	 * Each IOCBq can have two Buffers assigned, so go through the list
+	 * of buffers for this sequence and save two buffers in each IOCBq
+	 */
+	list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
+		if (!iocbq) {
+			lpfc_in_buf_free(vport->phba, d_buf);
+			continue;
+		}
+		if (!iocbq->context3) {
+			iocbq->context3 = d_buf;
+			iocbq->iocb.ulpBdeCount++;
+			pbde = (struct ulp_bde64 *)
+					&iocbq->iocb.unsli3.sli3Words[4];
+			pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
+
+			/* We need to get the size out of the right CQE */
+			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+			len = bf_get(lpfc_rcqe_length,
+				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
+			iocbq->iocb.unsli3.rcvsli3.acc_len += len;
+			tot_len += len;
+		} else {
+			iocbq = lpfc_sli_get_iocbq(vport->phba);
+			if (!iocbq) {
+				if (first_iocbq) {
+					first_iocbq->iocb.ulpStatus =
+							IOSTAT_FCP_RSP_ERROR;
+					first_iocbq->iocb.un.ulpWord[4] =
+							IOERR_NO_RESOURCES;
+				}
+				lpfc_in_buf_free(vport->phba, d_buf);
+				continue;
+			}
+			iocbq->context2 = d_buf;
+			iocbq->context3 = NULL;
+			iocbq->iocb.ulpBdeCount = 1;
+			iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+							LPFC_DATA_BUF_SIZE;
+
+			/* We need to get the size out of the right CQE */
+			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+			len = bf_get(lpfc_rcqe_length,
+				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
+			tot_len += len;
+			iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
+
+			iocbq->iocb.un.rcvels.remoteID = sid;
+			list_add_tail(&iocbq->list, &first_iocbq->list);
+		}
+	}
+	return first_iocbq;
+}
+
+static void
+lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
+			  struct hbq_dmabuf *seq_dmabuf)
+{
+	struct fc_frame_header *fc_hdr;
+	struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
+	struct lpfc_hba *phba = vport->phba;
+
+	fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+	iocbq = lpfc_prep_seq(vport, seq_dmabuf);
+	if (!iocbq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2707 Ring %d handler: Failed to allocate "
+				"iocb Rctl x%x Type x%x received\n",
+				LPFC_ELS_RING,
+				fc_hdr->fh_r_ctl, fc_hdr->fh_type);
+		return;
+	}
+	if (!lpfc_complete_unsol_iocb(phba,
+				      &phba->sli.ring[LPFC_ELS_RING],
+				      iocbq, fc_hdr->fh_r_ctl,
+				      fc_hdr->fh_type))
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2540 Ring %d handler: unexpected Rctl "
+				"x%x Type x%x received\n",
+				LPFC_ELS_RING,
+				fc_hdr->fh_r_ctl, fc_hdr->fh_type);
+
+	/* Free iocb created in lpfc_prep_seq */
+	list_for_each_entry_safe(curr_iocb, next_iocb,
+		&iocbq->list, list) {
+		list_del_init(&curr_iocb->list);
+		lpfc_sli_release_iocbq(phba, curr_iocb);
+	}
+	lpfc_sli_release_iocbq(phba, iocbq);
+}
+
+/**
+ * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held. This function processes all
+ * the received buffers and gives it to upper layers when a received buffer
+ * indicates that it is the final frame in the sequence. The interrupt
+ * service routine processes received buffers at interrupt contexts and adds
+ * received dma buffers to the rb_pend_list queue and signals the worker thread.
+ * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
+ * appropriate receive function when the final frame in a sequence is received.
+ **/
+void
+lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
+				 struct hbq_dmabuf *dmabuf)
+{
+	struct hbq_dmabuf *seq_dmabuf;
+	struct fc_frame_header *fc_hdr;
+	struct lpfc_vport *vport;
+	uint32_t fcfi;
+
+	/* Process each received buffer */
+	fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+	/* check to see if this a valid type of frame */
+	if (lpfc_fc_frame_check(phba, fc_hdr)) {
+		lpfc_in_buf_free(phba, &dmabuf->dbuf);
+		return;
+	}
+	if ((bf_get(lpfc_cqe_code,
+		    &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
+		fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
+			      &dmabuf->cq_event.cqe.rcqe_cmpl);
+	else
+		fcfi = bf_get(lpfc_rcqe_fcf_id,
+			      &dmabuf->cq_event.cqe.rcqe_cmpl);
+	vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
+	if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
+		/* throw out the frame */
+		lpfc_in_buf_free(phba, &dmabuf->dbuf);
+		return;
+	}
+	/* Handle the basic abort sequence (BA_ABTS) event */
+	if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
+		lpfc_sli4_handle_unsol_abort(vport, dmabuf);
+		return;
+	}
+
+	/* Link this frame */
+	seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
+	if (!seq_dmabuf) {
+		/* unable to add frame to vport - throw it out */
+		lpfc_in_buf_free(phba, &dmabuf->dbuf);
+		return;
+	}
+	/* If not last frame in sequence continue processing frames. */
+	if (!lpfc_seq_complete(seq_dmabuf))
+		return;
+
+	/* Send the complete sequence to the upper layer protocol */
+	lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
+}
+
+/**
+ * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * HBA consistent with the SLI-4 interface spec.  This routine
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
+ *
+ * This routine does not require any locks.  It's usage is expected
+ * to be driver load or reset recovery when the driver is
+ * sequential.
+ *
+ * Return codes
+ * 	0 - successful
+ *      -EIO - The mailbox failed to complete successfully.
+ * 	When this error occurs, the driver is not guaranteed
+ *	to have any rpi regions posted to the device and
+ *	must either attempt to repost the regions or take a
+ *	fatal error.
+ **/
+int
+lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
+{
+	struct lpfc_rpi_hdr *rpi_page;
+	uint32_t rc = 0;
+	uint16_t lrpi = 0;
+
+	/* SLI4 ports that support extents do not require RPI headers. */
+	if (!phba->sli4_hba.rpi_hdrs_in_use)
+		goto exit;
+	if (phba->sli4_hba.extents_in_use)
+		return -EIO;
+
+	list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
+		/*
+		 * Assign the rpi headers a physical rpi only if the driver
+		 * has not initialized those resources.  A port reset only
+		 * needs the headers posted.
+		 */
+		if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
+		    LPFC_RPI_RSRC_RDY)
+			rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
+
+		rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2008 Error %d posting all rpi "
+					"headers\n", rc);
+			rc = -EIO;
+			break;
+		}
+	}
+
+ exit:
+	bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
+	       LPFC_RPI_RSRC_RDY);
+	return rc;
+}
+
+/**
+ * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ * @rpi_page:  pointer to the rpi memory region.
+ *
+ * This routine is invoked to post a single rpi header to the
+ * HBA consistent with the SLI-4 interface spec.  This memory region
+ * maps up to 64 rpi context regions.
+ *
+ * Return codes
+ * 	0 - successful
+ * 	-ENOMEM - No available memory
+ *      -EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
+{
+	LPFC_MBOXQ_t *mboxq;
+	struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
+	uint32_t rc = 0;
+	uint32_t shdr_status, shdr_add_status;
+	union lpfc_sli4_cfg_shdr *shdr;
+
+	/* SLI4 ports that support extents do not require RPI headers. */
+	if (!phba->sli4_hba.rpi_hdrs_in_use)
+		return rc;
+	if (phba->sli4_hba.extents_in_use)
+		return -EIO;
+
+	/* The port is notified of the header region via a mailbox command. */
+	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2001 Unable to allocate memory for issuing "
+				"SLI_CONFIG_SPECIAL mailbox command\n");
+		return -ENOMEM;
+	}
+
+	/* Post all rpi memory regions to the port. */
+	hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
+	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
+			 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
+			 sizeof(struct lpfc_sli4_cfg_mhdr),
+			 LPFC_SLI4_MBX_EMBED);
+
+
+	/* Post the physical rpi to the port for this rpi header. */
+	bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
+	       rpi_page->start_rpi);
+	bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
+	       hdr_tmpl, rpi_page->page_count);
+
+	hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
+	hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mboxq, phba->mbox_mem_pool);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2514 POST_RPI_HDR mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		rc = -ENXIO;
+	}
+	return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * HBA consistent with the SLI-4 interface spec.  This routine
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
+ *
+ * Returns
+ * 	A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
+ * 	LPFC_RPI_ALLOC_ERROR if no rpis are available.
+ **/
+int
+lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
+{
+	unsigned long rpi;
+	uint16_t max_rpi, rpi_limit;
+	uint16_t rpi_remaining, lrpi = 0;
+	struct lpfc_rpi_hdr *rpi_hdr;
+
+	max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+	rpi_limit = phba->sli4_hba.next_rpi;
+
+	/*
+	 * Fetch the next logical rpi.  Because this index is logical,
+	 * the  driver starts at 0 each time.
+	 */
+	spin_lock_irq(&phba->hbalock);
+	rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
+	if (rpi >= rpi_limit)
+		rpi = LPFC_RPI_ALLOC_ERROR;
+	else {
+		set_bit(rpi, phba->sli4_hba.rpi_bmask);
+		phba->sli4_hba.max_cfg_param.rpi_used++;
+		phba->sli4_hba.rpi_count++;
+	}
+
+	/*
+	 * Don't try to allocate more rpi header regions if the device limit
+	 * has been exhausted.
+	 */
+	if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
+	    (phba->sli4_hba.rpi_count >= max_rpi)) {
+		spin_unlock_irq(&phba->hbalock);
+		return rpi;
+	}
+
+	/*
+	 * RPI header postings are not required for SLI4 ports capable of
+	 * extents.
+	 */
+	if (!phba->sli4_hba.rpi_hdrs_in_use) {
+		spin_unlock_irq(&phba->hbalock);
+		return rpi;
+	}
+
+	/*
+	 * If the driver is running low on rpi resources, allocate another
+	 * page now.  Note that the next_rpi value is used because
+	 * it represents how many are actually in use whereas max_rpi notes
+	 * how many are supported max by the device.
+	 */
+	rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
+	spin_unlock_irq(&phba->hbalock);
+	if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
+		rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
+		if (!rpi_hdr) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2002 Error Could not grow rpi "
+					"count\n");
+		} else {
+			lrpi = rpi_hdr->start_rpi;
+			rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
+			lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
+		}
+	}
+
+	return rpi;
+}
+
+/**
+ * lpfc_sli4_free_rpi - Release an rpi for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an rpi to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
+{
+	if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
+		phba->sli4_hba.rpi_count--;
+		phba->sli4_hba.max_cfg_param.rpi_used--;
+	}
+}
+
+/**
+ * lpfc_sli4_free_rpi - Release an rpi for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an rpi to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
+{
+	spin_lock_irq(&phba->hbalock);
+	__lpfc_sli4_free_rpi(phba, rpi);
+	spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to remove the memory region that
+ * provided rpi via a bitmask.
+ **/
+void
+lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
+{
+	kfree(phba->sli4_hba.rpi_bmask);
+	kfree(phba->sli4_hba.rpi_ids);
+	bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
+}
+
+/**
+ * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to remove the memory region that
+ * provided rpi via a bitmask.
+ **/
+int
+lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
+	void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
+{
+	LPFC_MBOXQ_t *mboxq;
+	struct lpfc_hba *phba = ndlp->phba;
+	int rc;
+
+	/* The port is notified of the header region via a mailbox command. */
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq)
+		return -ENOMEM;
+
+	/* Post all rpi memory regions to the port. */
+	lpfc_resume_rpi(mboxq, ndlp);
+	if (cmpl) {
+		mboxq->mbox_cmpl = cmpl;
+		mboxq->context1 = arg;
+		mboxq->context2 = ndlp;
+	} else
+		mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	mboxq->vport = ndlp->vport;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2010 Resume RPI Mailbox failed "
+				"status %d, mbxStatus x%x\n", rc,
+				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		return -EIO;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_sli4_init_vpi - Initialize a vpi with the port
+ * @vport: Pointer to the vport for which the vpi is being initialized
+ *
+ * This routine is invoked to activate a vpi with the port.
+ *
+ * Returns:
+ *    0 success
+ *    -Evalue otherwise
+ **/
+int
+lpfc_sli4_init_vpi(struct lpfc_vport *vport)
+{
+	LPFC_MBOXQ_t *mboxq;
+	int rc = 0;
+	int retval = MBX_SUCCESS;
+	uint32_t mbox_tmo;
+	struct lpfc_hba *phba = vport->phba;
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq)
+		return -ENOMEM;
+	lpfc_init_vpi(phba, mboxq, vport->vpi);
+	mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
+	rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+	if (rc != MBX_SUCCESS) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
+				"2022 INIT VPI Mailbox failed "
+				"status %d, mbxStatus x%x\n", rc,
+				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+		retval = -EIO;
+	}
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mboxq, vport->phba->mbox_mem_pool);
+
+	return retval;
+}
+
+/**
+ * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * This routine is invoked to manually add a single FCF record. The caller
+ * must pass a completely initialized FCF_Record.  This routine takes
+ * care of the nonembedded mailbox operations.
+ **/
+static void
+lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	void *virt_addr;
+	union lpfc_sli4_cfg_shdr *shdr;
+	uint32_t shdr_status, shdr_add_status;
+
+	virt_addr = mboxq->sge_array->addr[0];
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+
+	if ((shdr_status || shdr_add_status) &&
+		(shdr_status != STATUS_FCF_IN_USE))
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2558 ADD_FCF_RECORD mailbox failed with "
+			"status x%x add_status x%x\n",
+			shdr_status, shdr_add_status);
+
+	lpfc_sli4_mbox_cmd_free(phba, mboxq);
+}
+
+/**
+ * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_record:  pointer to the initialized fcf record to add.
+ *
+ * This routine is invoked to manually add a single FCF record. The caller
+ * must pass a completely initialized FCF_Record.  This routine takes
+ * care of the nonembedded mailbox operations.
+ **/
+int
+lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
+{
+	int rc = 0;
+	LPFC_MBOXQ_t *mboxq;
+	uint8_t *bytep;
+	void *virt_addr;
+	dma_addr_t phys_addr;
+	struct lpfc_mbx_sge sge;
+	uint32_t alloc_len, req_len;
+	uint32_t fcfindex;
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2009 Failed to allocate mbox for ADD_FCF cmd\n");
+		return -ENOMEM;
+	}
+
+	req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
+		  sizeof(uint32_t);
+
+	/* Allocate DMA memory and set up the non-embedded mailbox command */
+	alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+				     LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
+				     req_len, LPFC_SLI4_MBX_NEMBED);
+	if (alloc_len < req_len) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2523 Allocated DMA memory size (x%x) is "
+			"less than the requested DMA memory "
+			"size (x%x)\n", alloc_len, req_len);
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		return -ENOMEM;
+	}
+
+	/*
+	 * Get the first SGE entry from the non-embedded DMA memory.  This
+	 * routine only uses a single SGE.
+	 */
+	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
+	phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
+	virt_addr = mboxq->sge_array->addr[0];
+	/*
+	 * Configure the FCF record for FCFI 0.  This is the driver's
+	 * hardcoded default and gets used in nonFIP mode.
+	 */
+	fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
+	bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
+	lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
+
+	/*
+	 * Copy the fcf_index and the FCF Record Data. The data starts after
+	 * the FCoE header plus word10. The data copy needs to be endian
+	 * correct.
+	 */
+	bytep += sizeof(uint32_t);
+	lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
+	mboxq->vport = phba->pport;
+	mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2515 ADD_FCF_RECORD mailbox failed with "
+			"status 0x%x\n", rc);
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		rc = -EIO;
+	} else
+		rc = 0;
+
+	return rc;
+}
+
+/**
+ * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_record:  pointer to the fcf record to write the default data.
+ * @fcf_index: FCF table entry index.
+ *
+ * This routine is invoked to build the driver's default FCF record.  The
+ * values used are hardcoded.  This routine handles memory initialization.
+ *
+ **/
+void
+lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
+				struct fcf_record *fcf_record,
+				uint16_t fcf_index)
+{
+	memset(fcf_record, 0, sizeof(struct fcf_record));
+	fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
+	fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
+	fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
+	bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
+	bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
+	bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
+	bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
+	bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
+	bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
+	bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
+	bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
+	bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
+	bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
+	bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
+	bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
+	bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
+		LPFC_FCF_FPMA | LPFC_FCF_SPMA);
+	/* Set the VLAN bit map */
+	if (phba->valid_vlan) {
+		fcf_record->vlan_bitmap[phba->vlan_id / 8]
+			= 1 << (phba->vlan_id % 8);
+	}
+}
+
+/**
+ * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: FCF table entry offset.
+ *
+ * This routine is invoked to scan the entire FCF table by reading FCF
+ * record and processing it one at a time starting from the @fcf_index
+ * for initial FCF discovery or fast FCF failover rediscovery.
+ *
+ * Return 0 if the mailbox command is submitted successfully, none 0
+ * otherwise.
+ **/
+int
+lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+	int rc = 0, error;
+	LPFC_MBOXQ_t *mboxq;
+
+	phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
+	phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2000 Failed to allocate mbox for "
+				"READ_FCF cmd\n");
+		error = -ENOMEM;
+		goto fail_fcf_scan;
+	}
+	/* Construct the read FCF record mailbox command */
+	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
+	if (rc) {
+		error = -EINVAL;
+		goto fail_fcf_scan;
+	}
+	/* Issue the mailbox command asynchronously */
+	mboxq->vport = phba->pport;
+	mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
+
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag |= FCF_TS_INPROG;
+	spin_unlock_irq(&phba->hbalock);
+
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED)
+		error = -EIO;
+	else {
+		/* Reset eligible FCF count for new scan */
+		if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
+			phba->fcf.eligible_fcf_cnt = 0;
+		error = 0;
+	}
+fail_fcf_scan:
+	if (error) {
+		if (mboxq)
+			lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		/* FCF scan failed, clear FCF_TS_INPROG flag */
+		spin_lock_irq(&phba->hbalock);
+		phba->hba_flag &= ~FCF_TS_INPROG;
+		spin_unlock_irq(&phba->hbalock);
+	}
+	return error;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: FCF table entry offset.
+ *
+ * This routine is invoked to read an FCF record indicated by @fcf_index
+ * and to use it for FLOGI roundrobin FCF failover.
+ *
+ * Return 0 if the mailbox command is submitted successfully, none 0
+ * otherwise.
+ **/
+int
+lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+	int rc = 0, error;
+	LPFC_MBOXQ_t *mboxq;
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
+				"2763 Failed to allocate mbox for "
+				"READ_FCF cmd\n");
+		error = -ENOMEM;
+		goto fail_fcf_read;
+	}
+	/* Construct the read FCF record mailbox command */
+	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
+	if (rc) {
+		error = -EINVAL;
+		goto fail_fcf_read;
+	}
+	/* Issue the mailbox command asynchronously */
+	mboxq->vport = phba->pport;
+	mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED)
+		error = -EIO;
+	else
+		error = 0;
+
+fail_fcf_read:
+	if (error && mboxq)
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+	return error;
+}
+
+/**
+ * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: FCF table entry offset.
+ *
+ * This routine is invoked to read an FCF record indicated by @fcf_index to
+ * determine whether it's eligible for FLOGI roundrobin failover list.
+ *
+ * Return 0 if the mailbox command is submitted successfully, none 0
+ * otherwise.
+ **/
+int
+lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+	int rc = 0, error;
+	LPFC_MBOXQ_t *mboxq;
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
+				"2758 Failed to allocate mbox for "
+				"READ_FCF cmd\n");
+				error = -ENOMEM;
+				goto fail_fcf_read;
+	}
+	/* Construct the read FCF record mailbox command */
+	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
+	if (rc) {
+		error = -EINVAL;
+		goto fail_fcf_read;
+	}
+	/* Issue the mailbox command asynchronously */
+	mboxq->vport = phba->pport;
+	mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED)
+		error = -EIO;
+	else
+		error = 0;
+
+fail_fcf_read:
+	if (error && mboxq)
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+	return error;
+}
+
+/**
+ * lpfc_check_next_fcf_pri
+ * phba pointer to the lpfc_hba struct for this port.
+ * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
+ * routine when the rr_bmask is empty. The FCF indecies are put into the
+ * rr_bmask based on their priority level. Starting from the highest priority
+ * to the lowest. The most likely FCF candidate will be in the highest
+ * priority group. When this routine is called it searches the fcf_pri list for
+ * next lowest priority group and repopulates the rr_bmask with only those
+ * fcf_indexes.
+ * returns:
+ * 1=success 0=failure
+ **/
+int
+lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
+{
+	uint16_t next_fcf_pri;
+	uint16_t last_index;
+	struct lpfc_fcf_pri *fcf_pri;
+	int rc;
+	int ret = 0;
+
+	last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
+			LPFC_SLI4_FCF_TBL_INDX_MAX);
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"3060 Last IDX %d\n", last_index);
+	if (list_empty(&phba->fcf.fcf_pri_list)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+			"3061 Last IDX %d\n", last_index);
+		return 0; /* Empty rr list */
+	}
+	next_fcf_pri = 0;
+	/*
+	 * Clear the rr_bmask and set all of the bits that are at this
+	 * priority.
+	 */
+	memset(phba->fcf.fcf_rr_bmask, 0,
+			sizeof(*phba->fcf.fcf_rr_bmask));
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+		if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
+			continue;
+		/*
+		 * the 1st priority that has not FLOGI failed
+		 * will be the highest.
+		 */
+		if (!next_fcf_pri)
+			next_fcf_pri = fcf_pri->fcf_rec.priority;
+		spin_unlock_irq(&phba->hbalock);
+		if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
+			rc = lpfc_sli4_fcf_rr_index_set(phba,
+						fcf_pri->fcf_rec.fcf_index);
+			if (rc)
+				return 0;
+		}
+		spin_lock_irq(&phba->hbalock);
+	}
+	/*
+	 * if next_fcf_pri was not set above and the list is not empty then
+	 * we have failed flogis on all of them. So reset flogi failed
+	 * and start at the begining.
+	 */
+	if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
+		list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+			fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
+			/*
+			 * the 1st priority that has not FLOGI failed
+			 * will be the highest.
+			 */
+			if (!next_fcf_pri)
+				next_fcf_pri = fcf_pri->fcf_rec.priority;
+			spin_unlock_irq(&phba->hbalock);
+			if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
+				rc = lpfc_sli4_fcf_rr_index_set(phba,
+						fcf_pri->fcf_rec.fcf_index);
+				if (rc)
+					return 0;
+			}
+			spin_lock_irq(&phba->hbalock);
+		}
+	} else
+		ret = 1;
+	spin_unlock_irq(&phba->hbalock);
+
+	return ret;
+}
+/**
+ * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to get the next eligible FCF record index in a round
+ * robin fashion. If the next eligible FCF record index equals to the
+ * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
+ * shall be returned, otherwise, the next eligible FCF record's index
+ * shall be returned.
+ **/
+uint16_t
+lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
+{
+	uint16_t next_fcf_index;
+
+	/* Search start from next bit of currently registered FCF index */
+next_priority:
+	next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
+					LPFC_SLI4_FCF_TBL_INDX_MAX;
+	next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
+				       LPFC_SLI4_FCF_TBL_INDX_MAX,
+				       next_fcf_index);
+
+	/* Wrap around condition on phba->fcf.fcf_rr_bmask */
+	if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+		/*
+		 * If we have wrapped then we need to clear the bits that
+		 * have been tested so that we can detect when we should
+		 * change the priority level.
+		 */
+		next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
+					       LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
+	}
+
+
+	/* Check roundrobin failover list empty condition */
+	if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
+		next_fcf_index == phba->fcf.current_rec.fcf_indx) {
+		/*
+		 * If next fcf index is not found check if there are lower
+		 * Priority level fcf's in the fcf_priority list.
+		 * Set up the rr_bmask with all of the avaiable fcf bits
+		 * at that level and continue the selection process.
+		 */
+		if (lpfc_check_next_fcf_pri_level(phba))
+			goto next_priority;
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+				"2844 No roundrobin failover FCF available\n");
+		if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
+			return LPFC_FCOE_FCF_NEXT_NONE;
+		else {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+				"3063 Only FCF available idx %d, flag %x\n",
+				next_fcf_index,
+			phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
+			return next_fcf_index;
+		}
+	}
+
+	if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
+		phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
+		LPFC_FCF_FLOGI_FAILED)
+		goto next_priority;
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"2845 Get next roundrobin failover FCF (x%x)\n",
+			next_fcf_index);
+
+	return next_fcf_index;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine sets the FCF record index in to the eligible bmask for
+ * roundrobin failover search. It checks to make sure that the index
+ * does not go beyond the range of the driver allocated bmask dimension
+ * before setting the bit.
+ *
+ * Returns 0 if the index bit successfully set, otherwise, it returns
+ * -EINVAL.
+ **/
+int
+lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+				"2610 FCF (x%x) reached driver's book "
+				"keeping dimension:x%x\n",
+				fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
+		return -EINVAL;
+	}
+	/* Set the eligible FCF record index bmask */
+	set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"2790 Set FCF (x%x) to roundrobin FCF failover "
+			"bmask\n", fcf_index);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine clears the FCF record index from the eligible bmask for
+ * roundrobin failover search. It checks to make sure that the index
+ * does not go beyond the range of the driver allocated bmask dimension
+ * before clearing the bit.
+ **/
+void
+lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+	struct lpfc_fcf_pri *fcf_pri;
+	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+				"2762 FCF (x%x) reached driver's book "
+				"keeping dimension:x%x\n",
+				fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
+		return;
+	}
+	/* Clear the eligible FCF record index bmask */
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+		if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
+			list_del_init(&fcf_pri->list);
+			break;
+		}
+	}
+	spin_unlock_irq(&phba->hbalock);
+	clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"2791 Clear FCF (x%x) from roundrobin failover "
+			"bmask\n", fcf_index);
+}
+
+/**
+ * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is the completion routine for the rediscover FCF table mailbox
+ * command. If the mailbox command returned failure, it will try to stop the
+ * FCF rediscover wait timer.
+ **/
+void
+lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+	struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
+	uint32_t shdr_status, shdr_add_status;
+
+	redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
+
+	shdr_status = bf_get(lpfc_mbox_hdr_status,
+			     &redisc_fcf->header.cfg_shdr.response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+			     &redisc_fcf->header.cfg_shdr.response);
+	if (shdr_status || shdr_add_status) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+				"2746 Requesting for FCF rediscovery failed "
+				"status x%x add_status x%x\n",
+				shdr_status, shdr_add_status);
+		if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
+			spin_unlock_irq(&phba->hbalock);
+			/*
+			 * CVL event triggered FCF rediscover request failed,
+			 * last resort to re-try current registered FCF entry.
+			 */
+			lpfc_retry_pport_discovery(phba);
+		} else {
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
+			spin_unlock_irq(&phba->hbalock);
+			/*
+			 * DEAD FCF event triggered FCF rediscover request
+			 * failed, last resort to fail over as a link down
+			 * to FCF registration.
+			 */
+			lpfc_sli4_fcf_dead_failthrough(phba);
+		}
+	} else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+				"2775 Start FCF rediscover quiescent timer\n");
+		/*
+		 * Start FCF rediscovery wait timer for pending FCF
+		 * before rescan FCF record table.
+		 */
+		lpfc_fcf_redisc_wait_start_timer(phba);
+	}
+
+	mempool_free(mbox, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to request for rediscovery of the entire FCF table
+ * by the port.
+ **/
+int
+lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *mbox;
+	struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
+	int rc, length;
+
+	/* Cancel retry delay timers to all vports before FCF rediscover */
+	lpfc_cancel_all_vport_retry_delay_timer(phba);
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2745 Failed to allocate mbox for "
+				"requesting FCF rediscover.\n");
+		return -ENOMEM;
+	}
+
+	length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
+		  sizeof(struct lpfc_sli4_cfg_mhdr));
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+			 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
+			 length, LPFC_SLI4_MBX_EMBED);
+
+	redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
+	/* Set count to 0 for invalidating the entire FCF database */
+	bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
+
+	/* Issue the mailbox command asynchronously */
+	mbox->vport = phba->pport;
+	mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
+	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+
+	if (rc == MBX_NOT_FINISHED) {
+		mempool_free(mbox, phba->mbox_mem_pool);
+		return -EIO;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function is the failover routine as a last resort to the FCF DEAD
+ * event when driver failed to perform fast FCF failover.
+ **/
+void
+lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
+{
+	uint32_t link_state;
+
+	/*
+	 * Last resort as FCF DEAD event failover will treat this as
+	 * a link down, but save the link state because we don't want
+	 * it to be changed to Link Down unless it is already down.
+	 */
+	link_state = phba->link_state;
+	lpfc_linkdown(phba);
+	phba->link_state = link_state;
+
+	/* Unregister FCF if no devices connected to it */
+	lpfc_unregister_unused_fcf(phba);
+}
+
+/**
+ * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
+ * @phba: pointer to lpfc hba data structure.
+ * @rgn23_data: pointer to configure region 23 data.
+ *
+ * This function gets SLI3 port configure region 23 data through memory dump
+ * mailbox command. When it successfully retrieves data, the size of the data
+ * will be returned, otherwise, 0 will be returned.
+ **/
+static uint32_t
+lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
+{
+	LPFC_MBOXQ_t *pmb = NULL;
+	MAILBOX_t *mb;
+	uint32_t offset = 0;
+	int rc;
+
+	if (!rgn23_data)
+		return 0;
+
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2600 failed to allocate mailbox memory\n");
+		return 0;
+	}
+	mb = &pmb->u.mb;
+
+	do {
+		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+					"2601 failed to read config "
+					"region 23, rc 0x%x Status 0x%x\n",
+					rc, mb->mbxStatus);
+			mb->un.varDmp.word_cnt = 0;
+		}
+		/*
+		 * dump mem may return a zero when finished or we got a
+		 * mailbox error, either way we are done.
+		 */
+		if (mb->un.varDmp.word_cnt == 0)
+			break;
+		if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
+			mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
+
+		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
+				       rgn23_data + offset,
+				       mb->un.varDmp.word_cnt);
+		offset += mb->un.varDmp.word_cnt;
+	} while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
+
+	mempool_free(pmb, phba->mbox_mem_pool);
+	return offset;
+}
+
+/**
+ * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
+ * @phba: pointer to lpfc hba data structure.
+ * @rgn23_data: pointer to configure region 23 data.
+ *
+ * This function gets SLI4 port configure region 23 data through memory dump
+ * mailbox command. When it successfully retrieves data, the size of the data
+ * will be returned, otherwise, 0 will be returned.
+ **/
+static uint32_t
+lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
+{
+	LPFC_MBOXQ_t *mboxq = NULL;
+	struct lpfc_dmabuf *mp = NULL;
+	struct lpfc_mqe *mqe;
+	uint32_t data_length = 0;
+	int rc;
+
+	if (!rgn23_data)
+		return 0;
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3105 failed to allocate mailbox memory\n");
+		return 0;
+	}
+
+	if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
+		goto out;
+	mqe = &mboxq->u.mqe;
+	mp = (struct lpfc_dmabuf *) mboxq->context1;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	if (rc)
+		goto out;
+	data_length = mqe->un.mb_words[5];
+	if (data_length == 0)
+		goto out;
+	if (data_length > DMP_RGN23_SIZE) {
+		data_length = 0;
+		goto out;
+	}
+	lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
+out:
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	if (mp) {
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+	}
+	return data_length;
+}
+
+/**
+ * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function read region 23 and parse TLV for port status to
+ * decide if the user disaled the port. If the TLV indicates the
+ * port is disabled, the hba_flag is set accordingly.
+ **/
+void
+lpfc_sli_read_link_ste(struct lpfc_hba *phba)
+{
+	uint8_t *rgn23_data = NULL;
+	uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
+	uint32_t offset = 0;
+
+	/* Get adapter Region 23 data */
+	rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
+	if (!rgn23_data)
+		goto out;
+
+	if (phba->sli_rev < LPFC_SLI_REV4)
+		data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
+	else {
+		if_type = bf_get(lpfc_sli_intf_if_type,
+				 &phba->sli4_hba.sli_intf);
+		if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
+			goto out;
+		data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
+	}
+
+	if (!data_size)
+		goto out;
+
+	/* Check the region signature first */
+	if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2619 Config region 23 has bad signature\n");
+			goto out;
+	}
+	offset += 4;
+
+	/* Check the data structure version */
+	if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2620 Config region 23 has bad version\n");
+		goto out;
+	}
+	offset += 4;
+
+	/* Parse TLV entries in the region */
+	while (offset < data_size) {
+		if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
+			break;
+		/*
+		 * If the TLV is not driver specific TLV or driver id is
+		 * not linux driver id, skip the record.
+		 */
+		if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
+		    (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
+		    (rgn23_data[offset + 3] != 0)) {
+			offset += rgn23_data[offset + 1] * 4 + 4;
+			continue;
+		}
+
+		/* Driver found a driver specific TLV in the config region */
+		sub_tlv_len = rgn23_data[offset + 1] * 4;
+		offset += 4;
+		tlv_offset = 0;
+
+		/*
+		 * Search for configured port state sub-TLV.
+		 */
+		while ((offset < data_size) &&
+			(tlv_offset < sub_tlv_len)) {
+			if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
+				offset += 4;
+				tlv_offset += 4;
+				break;
+			}
+			if (rgn23_data[offset] != PORT_STE_TYPE) {
+				offset += rgn23_data[offset + 1] * 4 + 4;
+				tlv_offset += rgn23_data[offset + 1] * 4 + 4;
+				continue;
+			}
+
+			/* This HBA contains PORT_STE configured */
+			if (!rgn23_data[offset + 2])
+				phba->hba_flag |= LINK_DISABLED;
+
+			goto out;
+		}
+	}
+
+out:
+	kfree(rgn23_data);
+	return;
+}
+
+/**
+ * lpfc_wr_object - write an object to the firmware
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @dmabuf_list: list of dmabufs to write to the port.
+ * @size: the total byte value of the objects to write to the port.
+ * @offset: the current offset to be used to start the transfer.
+ *
+ * This routine will create a wr_object mailbox command to send to the port.
+ * the mailbox command will be constructed using the dma buffers described in
+ * @dmabuf_list to create a list of BDEs. This routine will fill in as many
+ * BDEs that the imbedded mailbox can support. The @offset variable will be
+ * used to indicate the starting offset of the transfer and will also return
+ * the offset after the write object mailbox has completed. @size is used to
+ * determine the end of the object and whether the eof bit should be set.
+ *
+ * Return 0 is successful and offset will contain the the new offset to use
+ * for the next write.
+ * Return negative value for error cases.
+ **/
+int
+lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
+	       uint32_t size, uint32_t *offset)
+{
+	struct lpfc_mbx_wr_object *wr_object;
+	LPFC_MBOXQ_t *mbox;
+	int rc = 0, i = 0;
+	uint32_t shdr_status, shdr_add_status;
+	uint32_t mbox_tmo;
+	union lpfc_sli4_cfg_shdr *shdr;
+	struct lpfc_dmabuf *dmabuf;
+	uint32_t written = 0;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			LPFC_MBOX_OPCODE_WRITE_OBJECT,
+			sizeof(struct lpfc_mbx_wr_object) -
+			sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
+
+	wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
+	wr_object->u.request.write_offset = *offset;
+	sprintf((uint8_t *)wr_object->u.request.object_name, "/");
+	wr_object->u.request.object_name[0] =
+		cpu_to_le32(wr_object->u.request.object_name[0]);
+	bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
+	list_for_each_entry(dmabuf, dmabuf_list, list) {
+		if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
+			break;
+		wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
+		wr_object->u.request.bde[i].addrHigh =
+			putPaddrHigh(dmabuf->phys);
+		if (written + SLI4_PAGE_SIZE >= size) {
+			wr_object->u.request.bde[i].tus.f.bdeSize =
+				(size - written);
+			written += (size - written);
+			bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
+		} else {
+			wr_object->u.request.bde[i].tus.f.bdeSize =
+				SLI4_PAGE_SIZE;
+			written += SLI4_PAGE_SIZE;
+		}
+		i++;
+	}
+	wr_object->u.request.bde_count = i;
+	bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
+	if (!phba->sli4_hba.intr_enable)
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+	else {
+		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+	}
+	/* The IOCTL status is embedded in the mailbox subheader. */
+	shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
+	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+	if (rc != MBX_TIMEOUT)
+		mempool_free(mbox, phba->mbox_mem_pool);
+	if (shdr_status || shdr_add_status || rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"3025 Write Object mailbox failed with "
+				"status x%x add_status x%x, mbx status x%x\n",
+				shdr_status, shdr_add_status, rc);
+		rc = -ENXIO;
+	} else
+		*offset += wr_object->u.response.actual_write_length;
+	return rc;
+}
+
+/**
+ * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
+ * @vport: pointer to vport data structure.
+ *
+ * This function iterate through the mailboxq and clean up all REG_LOGIN
+ * and REG_VPI mailbox commands associated with the vport. This function
+ * is called when driver want to restart discovery of the vport due to
+ * a Clear Virtual Link event.
+ **/
+void
+lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
+{
+	struct lpfc_hba *phba = vport->phba;
+	LPFC_MBOXQ_t *mb, *nextmb;
+	struct lpfc_dmabuf *mp;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_nodelist *act_mbx_ndlp = NULL;
+	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+	LIST_HEAD(mbox_cmd_list);
+	uint8_t restart_loop;
+
+	/* Clean up internally queued mailbox commands with the vport */
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
+		if (mb->vport != vport)
+			continue;
+
+		if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
+			(mb->u.mb.mbxCommand != MBX_REG_VPI))
+			continue;
+
+		list_del(&mb->list);
+		list_add_tail(&mb->list, &mbox_cmd_list);
+	}
+	/* Clean up active mailbox command with the vport */
+	mb = phba->sli.mbox_active;
+	if (mb && (mb->vport == vport)) {
+		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
+			(mb->u.mb.mbxCommand == MBX_REG_VPI))
+			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+			act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
+			/* Put reference count for delayed processing */
+			act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
+			/* Unregister the RPI when mailbox complete */
+			mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
+		}
+	}
+	/* Cleanup any mailbox completions which are not yet processed */
+	do {
+		restart_loop = 0;
+		list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
+			/*
+			 * If this mailox is already processed or it is
+			 * for another vport ignore it.
+			 */
+			if ((mb->vport != vport) ||
+				(mb->mbox_flag & LPFC_MBX_IMED_UNREG))
+				continue;
+
+			if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
+				(mb->u.mb.mbxCommand != MBX_REG_VPI))
+				continue;
+
+			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+			if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+				ndlp = (struct lpfc_nodelist *)mb->context2;
+				/* Unregister the RPI when mailbox complete */
+				mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
+				restart_loop = 1;
+				spin_unlock_irq(&phba->hbalock);
+				spin_lock(shost->host_lock);
+				ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+				spin_unlock(shost->host_lock);
+				spin_lock_irq(&phba->hbalock);
+				break;
+			}
+		}
+	} while (restart_loop);
+
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Release the cleaned-up mailbox commands */
+	while (!list_empty(&mbox_cmd_list)) {
+		list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
+		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+			mp = (struct lpfc_dmabuf *) (mb->context1);
+			if (mp) {
+				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
+				kfree(mp);
+			}
+			ndlp = (struct lpfc_nodelist *) mb->context2;
+			mb->context2 = NULL;
+			if (ndlp) {
+				spin_lock(shost->host_lock);
+				ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+				spin_unlock(shost->host_lock);
+				lpfc_nlp_put(ndlp);
+			}
+		}
+		mempool_free(mb, phba->mbox_mem_pool);
+	}
+
+	/* Release the ndlp with the cleaned-up active mailbox command */
+	if (act_mbx_ndlp) {
+		spin_lock(shost->host_lock);
+		act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+		spin_unlock(shost->host_lock);
+		lpfc_nlp_put(act_mbx_ndlp);
+	}
+}
+
+/**
+ * lpfc_drain_txq - Drain the txq
+ * @phba: Pointer to HBA context object.
+ *
+ * This function attempt to submit IOCBs on the txq
+ * to the adapter.  For SLI4 adapters, the txq contains
+ * ELS IOCBs that have been deferred because the there
+ * are no SGLs.  This congestion can occur with large
+ * vport counts during node discovery.
+ **/
+
+uint32_t
+lpfc_drain_txq(struct lpfc_hba *phba)
+{
+	LIST_HEAD(completions);
+	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+	struct lpfc_iocbq *piocbq = 0;
+	unsigned long iflags = 0;
+	char *fail_msg = NULL;
+	struct lpfc_sglq *sglq;
+	union lpfc_wqe wqe;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	if (pring->txq_cnt > pring->txq_max)
+		pring->txq_max = pring->txq_cnt;
+
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+	while (pring->txq_cnt) {
+		spin_lock_irqsave(&phba->hbalock, iflags);
+
+		piocbq = lpfc_sli_ringtx_get(phba, pring);
+		sglq = __lpfc_sli_get_sglq(phba, piocbq);
+		if (!sglq) {
+			__lpfc_sli_ringtx_put(phba, pring, piocbq);
+			spin_unlock_irqrestore(&phba->hbalock, iflags);
+			break;
+		} else {
+			if (!piocbq) {
+				/* The txq_cnt out of sync. This should
+				 * never happen
+				 */
+				sglq = __lpfc_clear_active_sglq(phba,
+						 sglq->sli4_lxritag);
+				spin_unlock_irqrestore(&phba->hbalock, iflags);
+				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2823 txq empty and txq_cnt is %d\n ",
+					pring->txq_cnt);
+				break;
+			}
+		}
+
+		/* The xri and iocb resources secured,
+		 * attempt to issue request
+		 */
+		piocbq->sli4_lxritag = sglq->sli4_lxritag;
+		piocbq->sli4_xritag = sglq->sli4_xritag;
+		if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
+			fail_msg = "to convert bpl to sgl";
+		else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
+			fail_msg = "to convert iocb to wqe";
+		else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
+			fail_msg = " - Wq is full";
+		else
+			lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
+
+		if (fail_msg) {
+			/* Failed means we can't issue and need to cancel */
+			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+					"2822 IOCB failed %s iotag 0x%x "
+					"xri 0x%x\n",
+					fail_msg,
+					piocbq->iotag, piocbq->sli4_xritag);
+			list_add_tail(&piocbq->list, &completions);
+		}
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+	}
+
+	/* Cancel all the IOCBs that cannot be issued */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+				IOERR_SLI_ABORTED);
+
+	return pring->txq_cnt;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_sli.h b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_sli.h
new file mode 100644
index 0000000..3290b8e
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_sli.h
@@ -0,0 +1,304 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2007 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+/* forward declaration for LPFC_IOCB_t's use */
+struct lpfc_hba;
+struct lpfc_vport;
+
+/* Define the context types that SLI handles for abort and sums. */
+typedef enum _lpfc_ctx_cmd {
+	LPFC_CTX_LUN,
+	LPFC_CTX_TGT,
+	LPFC_CTX_HOST
+} lpfc_ctx_cmd;
+
+struct lpfc_cq_event {
+	struct list_head list;
+	union {
+		struct lpfc_mcqe		mcqe_cmpl;
+		struct lpfc_acqe_link		acqe_link;
+		struct lpfc_acqe_fip		acqe_fip;
+		struct lpfc_acqe_dcbx		acqe_dcbx;
+		struct lpfc_acqe_grp5		acqe_grp5;
+		struct lpfc_acqe_fc_la		acqe_fc;
+		struct lpfc_acqe_sli		acqe_sli;
+		struct lpfc_rcqe		rcqe_cmpl;
+		struct sli4_wcqe_xri_aborted	wcqe_axri;
+		struct lpfc_wcqe_complete	wcqe_cmpl;
+	} cqe;
+};
+
+/* This structure is used to handle IOCB requests / responses */
+struct lpfc_iocbq {
+	/* lpfc_iocbqs are used in double linked lists */
+	struct list_head list;
+	struct list_head clist;
+	struct list_head dlist;
+	uint16_t iotag;         /* pre-assigned IO tag */
+	uint16_t sli4_lxritag;  /* logical pre-assigned XRI. */
+	uint16_t sli4_xritag;   /* pre-assigned XRI, (OXID) tag. */
+	struct lpfc_cq_event cq_event;
+
+	IOCB_t iocb;		/* IOCB cmd */
+	uint8_t retry;		/* retry counter for IOCB cmd - if needed */
+	uint16_t iocb_flag;
+#define LPFC_IO_LIBDFC		1	/* libdfc iocb */
+#define LPFC_IO_WAKE		2	/* High Priority Queue signal flag */
+#define LPFC_IO_FCP		4	/* FCP command -- iocbq in scsi_buf */
+#define LPFC_DRIVER_ABORTED	8	/* driver aborted this request */
+#define LPFC_IO_FABRIC		0x10	/* Iocb send using fabric scheduler */
+#define LPFC_DELAY_MEM_FREE	0x20    /* Defer free'ing of FC data */
+#define LPFC_EXCHANGE_BUSY	0x40    /* SLI4 hba reported XB in response */
+#define LPFC_USE_FCPWQIDX	0x80    /* Submit to specified FCPWQ index */
+#define DSS_SECURITY_OP		0x100	/* security IO */
+#define LPFC_IO_ON_Q		0x200	/* The IO is still on the TXCMPLQ */
+#define LPFC_IO_DIF		0x400	/* T10 DIF IO */
+
+#define LPFC_FIP_ELS_ID_MASK	0xc000	/* ELS_ID range 0-3, non-shifted mask */
+#define LPFC_FIP_ELS_ID_SHIFT	14
+
+	uint8_t rsvd2;
+	uint32_t drvrTimeout;	/* driver timeout in seconds */
+	uint32_t fcp_wqidx;	/* index to FCP work queue */
+	struct lpfc_vport *vport;/* virtual port pointer */
+	void *context1;		/* caller context information */
+	void *context2;		/* caller context information */
+	void *context3;		/* caller context information */
+	union {
+		wait_queue_head_t    *wait_queue;
+		struct lpfc_iocbq    *rsp_iocb;
+		struct lpfcMboxq     *mbox;
+		struct lpfc_nodelist *ndlp;
+		struct lpfc_node_rrq *rrq;
+	} context_un;
+
+	void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+			   struct lpfc_iocbq *);
+	void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+			   struct lpfc_iocbq *);
+};
+
+#define SLI_IOCB_RET_IOCB      1	/* Return IOCB if cmd ring full */
+
+#define IOCB_SUCCESS        0
+#define IOCB_BUSY           1
+#define IOCB_ERROR          2
+#define IOCB_TIMEDOUT       3
+
+#define LPFC_MBX_WAKE		1
+#define LPFC_MBX_IMED_UNREG	2
+
+typedef struct lpfcMboxq {
+	/* MBOXQs are used in single linked lists */
+	struct list_head list;	/* ptr to next mailbox command */
+	union {
+		MAILBOX_t mb;		/* Mailbox cmd */
+		struct lpfc_mqe mqe;
+	} u;
+	struct lpfc_vport *vport;/* virtual port pointer */
+	void *context1;		/* caller context information */
+	void *context2;		/* caller context information */
+
+	void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
+	uint8_t mbox_flag;
+	uint16_t in_ext_byte_len;
+	uint16_t out_ext_byte_len;
+	uint8_t  mbox_offset_word;
+	struct lpfc_mcqe mcqe;
+	struct lpfc_mbx_nembed_sge_virt *sge_array;
+} LPFC_MBOXQ_t;
+
+#define MBX_POLL        1	/* poll mailbox till command done, then
+				   return */
+#define MBX_NOWAIT      2	/* issue command then return immediately */
+
+#define LPFC_MAX_RING_MASK  5	/* max num of rctl/type masks allowed per
+				   ring */
+#define LPFC_MAX_RING       4	/* max num of SLI rings used by driver */
+
+struct lpfc_sli_ring;
+
+struct lpfc_sli_ring_mask {
+	uint8_t profile;	/* profile associated with ring */
+	uint8_t rctl;	/* rctl / type pair configured for ring */
+	uint8_t type;	/* rctl / type pair configured for ring */
+	uint8_t rsvd;
+	/* rcv'd unsol event */
+	void (*lpfc_sli_rcv_unsol_event) (struct lpfc_hba *,
+					 struct lpfc_sli_ring *,
+					 struct lpfc_iocbq *);
+};
+
+
+/* Structure used to hold SLI statistical counters and info */
+struct lpfc_sli_ring_stat {
+	uint64_t iocb_event;	 /* IOCB event counters */
+	uint64_t iocb_cmd;	 /* IOCB cmd issued */
+	uint64_t iocb_rsp;	 /* IOCB rsp received */
+	uint64_t iocb_cmd_delay; /* IOCB cmd ring delay */
+	uint64_t iocb_cmd_full;	 /* IOCB cmd ring full */
+	uint64_t iocb_cmd_empty; /* IOCB cmd ring is now empty */
+	uint64_t iocb_rsp_full;	 /* IOCB rsp ring full */
+};
+
+/* Structure used to hold SLI ring information */
+struct lpfc_sli_ring {
+	uint16_t flag;		/* ring flags */
+#define LPFC_DEFERRED_RING_EVENT 0x001	/* Deferred processing a ring event */
+#define LPFC_CALL_RING_AVAILABLE 0x002	/* indicates cmd was full */
+#define LPFC_STOP_IOCB_EVENT     0x020	/* Stop processing IOCB cmds event */
+	uint16_t abtsiotag;	/* tracks next iotag to use for ABTS */
+
+	uint32_t local_getidx;   /* last available cmd index (from cmdGetInx) */
+	uint32_t next_cmdidx;    /* next_cmd index */
+	uint32_t rspidx;	/* current index in response ring */
+	uint32_t cmdidx;	/* current index in command ring */
+	uint8_t rsvd;
+	uint8_t ringno;		/* ring number */
+	uint16_t numCiocb;	/* number of command iocb's per ring */
+	uint16_t numRiocb;	/* number of rsp iocb's per ring */
+	uint16_t sizeCiocb;	/* Size of command iocb's in this ring */
+	uint16_t sizeRiocb;	/* Size of response iocb's in this ring */
+
+	uint32_t fast_iotag;	/* max fastlookup based iotag           */
+	uint32_t iotag_ctr;	/* keeps track of the next iotag to use */
+	uint32_t iotag_max;	/* max iotag value to use               */
+	struct list_head txq;
+	uint16_t txq_cnt;	/* current length of queue */
+	uint16_t txq_max;	/* max length */
+	struct list_head txcmplq;
+	uint16_t txcmplq_cnt;	/* current length of queue */
+	uint16_t txcmplq_max;	/* max length */
+	uint32_t *cmdringaddr;	/* virtual address for cmd rings */
+	uint32_t *rspringaddr;	/* virtual address for rsp rings */
+	uint32_t missbufcnt;	/* keep track of buffers to post */
+	struct list_head postbufq;
+	uint16_t postbufq_cnt;	/* current length of queue */
+	uint16_t postbufq_max;	/* max length */
+	struct list_head iocb_continueq;
+	uint16_t iocb_continueq_cnt;	/* current length of queue */
+	uint16_t iocb_continueq_max;	/* max length */
+	struct list_head iocb_continue_saveq;
+
+	struct lpfc_sli_ring_mask prt[LPFC_MAX_RING_MASK];
+	uint32_t num_mask;	/* number of mask entries in prt array */
+	void (*lpfc_sli_rcv_async_status) (struct lpfc_hba *,
+		struct lpfc_sli_ring *, struct lpfc_iocbq *);
+
+	struct lpfc_sli_ring_stat stats;	/* SLI statistical info */
+
+	/* cmd ring available */
+	void (*lpfc_sli_cmd_available) (struct lpfc_hba *,
+					struct lpfc_sli_ring *);
+};
+
+/* Structure used for configuring rings to a specific profile or rctl / type */
+struct lpfc_hbq_init {
+	uint32_t rn;		/* Receive buffer notification */
+	uint32_t entry_count;	/* max # of entries in HBQ */
+	uint32_t headerLen;	/* 0 if not profile 4 or 5 */
+	uint32_t logEntry;	/* Set to 1 if this HBQ used for LogEntry */
+	uint32_t profile;	/* Selection profile 0=all, 7=logentry */
+	uint32_t ring_mask;	/* Binds HBQ to a ring e.g. Ring0=b0001,
+				 * ring2=b0100 */
+	uint32_t hbq_index;	/* index of this hbq in ring .HBQs[] */
+
+	uint32_t seqlenoff;
+	uint32_t maxlen;
+	uint32_t seqlenbcnt;
+	uint32_t cmdcodeoff;
+	uint32_t cmdmatch[8];
+	uint32_t mask_count;	/* number of mask entries in prt array */
+	struct hbq_mask hbqMasks[6];
+
+	/* Non-config rings fields to keep track of buffer allocations */
+	uint32_t buffer_count;	/* number of buffers allocated */
+	uint32_t init_count;	/* number to allocate when initialized */
+	uint32_t add_count;	/* number to allocate when starved */
+} ;
+
+/* Structure used to hold SLI statistical counters and info */
+struct lpfc_sli_stat {
+	uint64_t mbox_stat_err;  /* Mbox cmds completed status error */
+	uint64_t mbox_cmd;       /* Mailbox commands issued */
+	uint64_t sli_intr;       /* Count of Host Attention interrupts */
+	uint32_t err_attn_event; /* Error Attn event counters */
+	uint32_t link_event;     /* Link event counters */
+	uint32_t mbox_event;     /* Mailbox event counters */
+	uint32_t mbox_busy;	 /* Mailbox cmd busy */
+};
+
+/* Structure to store link status values when port stats are reset */
+struct lpfc_lnk_stat {
+	uint32_t link_failure_count;
+	uint32_t loss_of_sync_count;
+	uint32_t loss_of_signal_count;
+	uint32_t prim_seq_protocol_err_count;
+	uint32_t invalid_tx_word_count;
+	uint32_t invalid_crc_count;
+	uint32_t error_frames;
+	uint32_t link_events;
+};
+
+/* Structure used to hold SLI information */
+struct lpfc_sli {
+	uint32_t num_rings;
+	uint32_t sli_flag;
+
+	/* Additional sli_flags */
+#define LPFC_SLI_MBOX_ACTIVE      0x100	/* HBA mailbox is currently active */
+#define LPFC_SLI_ACTIVE           0x200	/* SLI in firmware is active */
+#define LPFC_PROCESS_LA           0x400	/* Able to process link attention */
+#define LPFC_BLOCK_MGMT_IO        0x800	/* Don't allow mgmt mbx or iocb cmds */
+#define LPFC_MENLO_MAINT          0x1000 /* need for menl fw download */
+#define LPFC_SLI_ASYNC_MBX_BLK    0x2000 /* Async mailbox is blocked */
+
+	struct lpfc_sli_ring ring[LPFC_MAX_RING];
+	int fcp_ring;		/* ring used for FCP initiator commands */
+	int next_ring;
+
+	int extra_ring;		/* extra ring used for other protocols */
+
+	struct lpfc_sli_stat slistat;	/* SLI statistical info */
+	struct list_head mboxq;
+	uint16_t mboxq_cnt;	/* current length of queue */
+	uint16_t mboxq_max;	/* max length */
+	LPFC_MBOXQ_t *mbox_active;	/* active mboxq information */
+	struct list_head mboxq_cmpl;
+
+	struct timer_list mbox_tmo;	/* Hold clk to timeout active mbox
+					   cmd */
+
+#define LPFC_IOCBQ_LOOKUP_INCREMENT  1024
+	struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */
+	size_t iocbq_lookup_len;           /* current lengs of the array */
+	uint16_t  last_iotag;              /* last allocated IOTAG */
+	unsigned long  stats_start;        /* in seconds */
+	struct lpfc_lnk_stat lnk_stat_offsets;
+};
+
+/* Timeout for normal outstanding mbox command (Seconds) */
+#define LPFC_MBOX_TMO				30
+/* Timeout for non-flash-based outstanding sli_config mbox command (Seconds) */
+#define LPFC_MBOX_SLI4_CONFIG_TMO		60
+/* Timeout for flash-based outstanding sli_config mbox command (Seconds) */
+#define LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO	300
+/* Timeout for other flash-based outstanding mbox command (Seconds) */
+#define LPFC_MBOX_TMO_FLASH_CMD			300
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_sli4.h b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_sli4.h
new file mode 100644
index 0000000..c19d139
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_sli4.h
@@ -0,0 +1,663 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2009-2011 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#define LPFC_ACTIVE_MBOX_WAIT_CNT               100
+#define LPFC_XRI_EXCH_BUSY_WAIT_TMO		10000
+#define LPFC_XRI_EXCH_BUSY_WAIT_T1   		10
+#define LPFC_XRI_EXCH_BUSY_WAIT_T2              30000
+#define LPFC_RELEASE_NOTIFICATION_INTERVAL	32
+#define LPFC_RPI_LOW_WATER_MARK			10
+
+#define LPFC_UNREG_FCF                          1
+#define LPFC_SKIP_UNREG_FCF                     0
+
+/* Amount of time in seconds for waiting FCF rediscovery to complete */
+#define LPFC_FCF_REDISCOVER_WAIT_TMO		2000 /* msec */
+
+/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
+#define LPFC_NEMBED_MBOX_SGL_CNT		254
+
+/* Multi-queue arrangement for fast-path FCP work queues */
+#define LPFC_FN_EQN_MAX       8
+#define LPFC_SP_EQN_DEF       1
+#define LPFC_FP_EQN_DEF       4
+#define LPFC_FP_EQN_MIN       1
+#define LPFC_FP_EQN_MAX       (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
+
+#define LPFC_FN_WQN_MAX       32
+#define LPFC_SP_WQN_DEF       1
+#define LPFC_FP_WQN_DEF       4
+#define LPFC_FP_WQN_MIN       1
+#define LPFC_FP_WQN_MAX       (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF)
+
+/*
+ * Provide the default FCF Record attributes used by the driver
+ * when nonFIP mode is configured and there is no other default
+ * FCF Record attributes.
+ */
+#define LPFC_FCOE_FCF_DEF_INDEX	0
+#define LPFC_FCOE_FCF_GET_FIRST	0xFFFF
+#define LPFC_FCOE_FCF_NEXT_NONE	0xFFFF
+
+#define LPFC_FCOE_NULL_VID	0xFFF
+#define LPFC_FCOE_IGNORE_VID	0xFFFF
+
+/* First 3 bytes of default FCF MAC is specified by FC_MAP */
+#define LPFC_FCOE_FCF_MAC3	0xFF
+#define LPFC_FCOE_FCF_MAC4	0xFF
+#define LPFC_FCOE_FCF_MAC5	0xFE
+#define LPFC_FCOE_FCF_MAP0	0x0E
+#define LPFC_FCOE_FCF_MAP1	0xFC
+#define LPFC_FCOE_FCF_MAP2	0x00
+#define LPFC_FCOE_MAX_RCV_SIZE	0x800
+#define LPFC_FCOE_FKA_ADV_PER	0
+#define LPFC_FCOE_FIP_PRIORITY	0x80
+
+#define sli4_sid_from_fc_hdr(fc_hdr)  \
+	((fc_hdr)->fh_s_id[0] << 16 | \
+	 (fc_hdr)->fh_s_id[1] <<  8 | \
+	 (fc_hdr)->fh_s_id[2])
+
+#define sli4_fctl_from_fc_hdr(fc_hdr)  \
+	((fc_hdr)->fh_f_ctl[0] << 16 | \
+	 (fc_hdr)->fh_f_ctl[1] <<  8 | \
+	 (fc_hdr)->fh_f_ctl[2])
+
+#define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
+
+enum lpfc_sli4_queue_type {
+	LPFC_EQ,
+	LPFC_GCQ,
+	LPFC_MCQ,
+	LPFC_WCQ,
+	LPFC_RCQ,
+	LPFC_MQ,
+	LPFC_WQ,
+	LPFC_HRQ,
+	LPFC_DRQ
+};
+
+/* The queue sub-type defines the functional purpose of the queue */
+enum lpfc_sli4_queue_subtype {
+	LPFC_NONE,
+	LPFC_MBOX,
+	LPFC_FCP,
+	LPFC_ELS,
+	LPFC_USOL
+};
+
+union sli4_qe {
+	void *address;
+	struct lpfc_eqe *eqe;
+	struct lpfc_cqe *cqe;
+	struct lpfc_mcqe *mcqe;
+	struct lpfc_wcqe_complete *wcqe_complete;
+	struct lpfc_wcqe_release *wcqe_release;
+	struct sli4_wcqe_xri_aborted *wcqe_xri_aborted;
+	struct lpfc_rcqe_complete *rcqe_complete;
+	struct lpfc_mqe *mqe;
+	union  lpfc_wqe *wqe;
+	struct lpfc_rqe *rqe;
+};
+
+struct lpfc_queue {
+	struct list_head list;
+	enum lpfc_sli4_queue_type type;
+	enum lpfc_sli4_queue_subtype subtype;
+	struct lpfc_hba *phba;
+	struct list_head child_list;
+	uint32_t entry_count;	/* Number of entries to support on the queue */
+	uint32_t entry_size;	/* Size of each queue entry. */
+	uint32_t entry_repost;	/* Count of entries before doorbell is rung */
+#define LPFC_QUEUE_MIN_REPOST	8
+	uint32_t queue_id;	/* Queue ID assigned by the hardware */
+	uint32_t assoc_qid;     /* Queue ID associated with, for CQ/WQ/MQ */
+	struct list_head page_list;
+	uint32_t page_count;	/* Number of pages allocated for this queue */
+	uint32_t host_index;	/* The host's index for putting or getting */
+	uint32_t hba_index;	/* The last known hba index for get or put */
+	union sli4_qe qe[1];	/* array to index entries (must be last) */
+};
+
+struct lpfc_sli4_link {
+	uint8_t speed;
+	uint8_t duplex;
+	uint8_t status;
+	uint8_t type;
+	uint8_t number;
+	uint8_t fault;
+	uint16_t logical_speed;
+	uint16_t topology;
+};
+
+struct lpfc_fcf_rec {
+	uint8_t  fabric_name[8];
+	uint8_t  switch_name[8];
+	uint8_t  mac_addr[6];
+	uint16_t fcf_indx;
+	uint32_t priority;
+	uint16_t vlan_id;
+	uint32_t addr_mode;
+	uint32_t flag;
+#define BOOT_ENABLE	0x01
+#define RECORD_VALID	0x02
+};
+
+struct lpfc_fcf_pri_rec {
+	uint16_t fcf_index;
+#define LPFC_FCF_ON_PRI_LIST 0x0001
+#define LPFC_FCF_FLOGI_FAILED 0x0002
+	uint16_t flag;
+	uint32_t priority;
+};
+
+struct lpfc_fcf_pri {
+	struct list_head list;
+	struct lpfc_fcf_pri_rec fcf_rec;
+};
+
+/*
+ * Maximum FCF table index, it is for driver internal book keeping, it
+ * just needs to be no less than the supported HBA's FCF table size.
+ */
+#define LPFC_SLI4_FCF_TBL_INDX_MAX	32
+
+struct lpfc_fcf {
+	uint16_t fcfi;
+	uint32_t fcf_flag;
+#define FCF_AVAILABLE	0x01 /* FCF available for discovery */
+#define FCF_REGISTERED	0x02 /* FCF registered with FW */
+#define FCF_SCAN_DONE	0x04 /* FCF table scan done */
+#define FCF_IN_USE	0x08 /* Atleast one discovery completed */
+#define FCF_INIT_DISC	0x10 /* Initial FCF discovery */
+#define FCF_DEAD_DISC	0x20 /* FCF DEAD fast FCF failover discovery */
+#define FCF_ACVL_DISC	0x40 /* All CVL fast FCF failover discovery */
+#define FCF_DISCOVERY	(FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC)
+#define FCF_REDISC_PEND	0x80 /* FCF rediscovery pending */
+#define FCF_REDISC_EVT	0x100 /* FCF rediscovery event to worker thread */
+#define FCF_REDISC_FOV	0x200 /* Post FCF rediscovery fast failover */
+#define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
+	uint32_t addr_mode;
+	uint32_t eligible_fcf_cnt;
+	struct lpfc_fcf_rec current_rec;
+	struct lpfc_fcf_rec failover_rec;
+	struct list_head fcf_pri_list;
+	struct lpfc_fcf_pri fcf_pri[LPFC_SLI4_FCF_TBL_INDX_MAX];
+	uint32_t current_fcf_scan_pri;
+	struct timer_list redisc_wait;
+	unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
+};
+
+
+#define LPFC_REGION23_SIGNATURE "RG23"
+#define LPFC_REGION23_VERSION	1
+#define LPFC_REGION23_LAST_REC  0xff
+#define DRIVER_SPECIFIC_TYPE	0xA2
+#define LINUX_DRIVER_ID		0x20
+#define PORT_STE_TYPE		0x1
+
+struct lpfc_fip_param_hdr {
+	uint8_t type;
+#define FCOE_PARAM_TYPE		0xA0
+	uint8_t length;
+#define FCOE_PARAM_LENGTH	2
+	uint8_t parm_version;
+#define FIPP_VERSION		0x01
+	uint8_t parm_flags;
+#define	lpfc_fip_param_hdr_fipp_mode_SHIFT	6
+#define	lpfc_fip_param_hdr_fipp_mode_MASK	0x3
+#define lpfc_fip_param_hdr_fipp_mode_WORD	parm_flags
+#define	FIPP_MODE_ON				0x1
+#define	FIPP_MODE_OFF				0x0
+#define FIPP_VLAN_VALID				0x1
+};
+
+struct lpfc_fcoe_params {
+	uint8_t fc_map[3];
+	uint8_t reserved1;
+	uint16_t vlan_tag;
+	uint8_t reserved[2];
+};
+
+struct lpfc_fcf_conn_hdr {
+	uint8_t type;
+#define FCOE_CONN_TBL_TYPE		0xA1
+	uint8_t length;   /* words */
+	uint8_t reserved[2];
+};
+
+struct lpfc_fcf_conn_rec {
+	uint16_t flags;
+#define	FCFCNCT_VALID		0x0001
+#define	FCFCNCT_BOOT		0x0002
+#define	FCFCNCT_PRIMARY		0x0004   /* if not set, Secondary */
+#define	FCFCNCT_FBNM_VALID	0x0008
+#define	FCFCNCT_SWNM_VALID	0x0010
+#define	FCFCNCT_VLAN_VALID	0x0020
+#define	FCFCNCT_AM_VALID	0x0040
+#define	FCFCNCT_AM_PREFERRED	0x0080   /* if not set, AM Required */
+#define	FCFCNCT_AM_SPMA		0x0100	 /* if not set, FPMA */
+
+	uint16_t vlan_tag;
+	uint8_t fabric_name[8];
+	uint8_t switch_name[8];
+};
+
+struct lpfc_fcf_conn_entry {
+	struct list_head list;
+	struct lpfc_fcf_conn_rec conn_rec;
+};
+
+/*
+ * Define the host's bootstrap mailbox.  This structure contains
+ * the member attributes needed to create, use, and destroy the
+ * bootstrap mailbox region.
+ *
+ * The macro definitions for the bmbx data structure are defined
+ * in lpfc_hw4.h with the register definition.
+ */
+struct lpfc_bmbx {
+	struct lpfc_dmabuf *dmabuf;
+	struct dma_address dma_address;
+	void *avirt;
+	dma_addr_t aphys;
+	uint32_t bmbx_size;
+};
+
+#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4
+
+#define LPFC_EQE_SIZE_4B 	4
+#define LPFC_EQE_SIZE_16B	16
+#define LPFC_CQE_SIZE		16
+#define LPFC_WQE_SIZE		64
+#define LPFC_MQE_SIZE		256
+#define LPFC_RQE_SIZE		8
+
+#define LPFC_EQE_DEF_COUNT	1024
+#define LPFC_CQE_DEF_COUNT      1024
+#define LPFC_WQE_DEF_COUNT      256
+#define LPFC_MQE_DEF_COUNT      16
+#define LPFC_RQE_DEF_COUNT	512
+
+#define LPFC_QUEUE_NOARM	false
+#define LPFC_QUEUE_REARM	true
+
+
+/*
+ * SLI4 CT field defines
+ */
+#define SLI4_CT_RPI 0
+#define SLI4_CT_VPI 1
+#define SLI4_CT_VFI 2
+#define SLI4_CT_FCFI 3
+
+#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE	0x10000
+#define LPFC_SLI4_FL1_MAX_BUF_SIZE	0X2000
+#define LPFC_SLI4_MIN_BUF_SIZE		0x400
+#define LPFC_SLI4_MAX_BUF_SIZE		0x20000
+
+/*
+ * SLI4 specific data structures
+ */
+struct lpfc_max_cfg_param {
+	uint16_t max_xri;
+	uint16_t xri_base;
+	uint16_t xri_used;
+	uint16_t max_rpi;
+	uint16_t rpi_base;
+	uint16_t rpi_used;
+	uint16_t max_vpi;
+	uint16_t vpi_base;
+	uint16_t vpi_used;
+	uint16_t max_vfi;
+	uint16_t vfi_base;
+	uint16_t vfi_used;
+	uint16_t max_fcfi;
+	uint16_t fcfi_used;
+	uint16_t max_eq;
+	uint16_t max_rq;
+	uint16_t max_cq;
+	uint16_t max_wq;
+};
+
+struct lpfc_hba;
+/* SLI4 HBA multi-fcp queue handler struct */
+struct lpfc_fcp_eq_hdl {
+	uint32_t idx;
+	struct lpfc_hba *phba;
+};
+
+/* Port Capabilities for SLI4 Parameters */
+struct lpfc_pc_sli4_params {
+	uint32_t supported;
+	uint32_t if_type;
+	uint32_t sli_rev;
+	uint32_t sli_family;
+	uint32_t featurelevel_1;
+	uint32_t featurelevel_2;
+	uint32_t proto_types;
+#define LPFC_SLI4_PROTO_FCOE	0x0000001
+#define LPFC_SLI4_PROTO_FC	0x0000002
+#define LPFC_SLI4_PROTO_NIC	0x0000004
+#define LPFC_SLI4_PROTO_ISCSI	0x0000008
+#define LPFC_SLI4_PROTO_RDMA	0x0000010
+	uint32_t sge_supp_len;
+	uint32_t if_page_sz;
+	uint32_t rq_db_window;
+	uint32_t loopbk_scope;
+	uint32_t eq_pages_max;
+	uint32_t eqe_size;
+	uint32_t cq_pages_max;
+	uint32_t cqe_size;
+	uint32_t mq_pages_max;
+	uint32_t mqe_size;
+	uint32_t mq_elem_cnt;
+	uint32_t wq_pages_max;
+	uint32_t wqe_size;
+	uint32_t rq_pages_max;
+	uint32_t rqe_size;
+	uint32_t hdr_pages_max;
+	uint32_t hdr_size;
+	uint32_t hdr_pp_align;
+	uint32_t sgl_pages_max;
+	uint32_t sgl_pp_align;
+	uint8_t cqv;
+	uint8_t mqv;
+	uint8_t wqv;
+	uint8_t rqv;
+};
+
+struct lpfc_iov {
+	uint32_t pf_number;
+	uint32_t vf_number;
+};
+
+struct lpfc_sli4_lnk_info {
+	uint8_t lnk_dv;
+#define LPFC_LNK_DAT_INVAL	0
+#define LPFC_LNK_DAT_VAL	1
+	uint8_t lnk_tp;
+#define LPFC_LNK_GE	0x0 /* FCoE */
+#define LPFC_LNK_FC	0x1 /* FC   */
+	uint8_t lnk_no;
+};
+
+/* SLI4 HBA data structure entries */
+struct lpfc_sli4_hba {
+	void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
+					     PCI BAR0, config space registers */
+	void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for
+					     PCI BAR1, control registers */
+	void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
+					     PCI BAR2, doorbell registers */
+	union {
+		struct {
+			/* IF Type 0, BAR 0 PCI cfg space reg mem map */
+			void __iomem *UERRLOregaddr;
+			void __iomem *UERRHIregaddr;
+			void __iomem *UEMASKLOregaddr;
+			void __iomem *UEMASKHIregaddr;
+		} if_type0;
+		struct {
+			/* IF Type 2, BAR 0 PCI cfg space reg mem map. */
+			void __iomem *STATUSregaddr;
+			void __iomem *CTRLregaddr;
+			void __iomem *ERR1regaddr;
+#define SLIPORT_ERR1_REG_ERR_CODE_1		0x1
+#define SLIPORT_ERR1_REG_ERR_CODE_2		0x2
+			void __iomem *ERR2regaddr;
+#define SLIPORT_ERR2_REG_FW_RESTART		0x0
+#define SLIPORT_ERR2_REG_FUNC_PROVISON		0x1
+#define SLIPORT_ERR2_REG_FORCED_DUMP		0x2
+#define SLIPORT_ERR2_REG_FAILURE_EQ		0x3
+#define SLIPORT_ERR2_REG_FAILURE_CQ		0x4
+#define SLIPORT_ERR2_REG_FAILURE_BUS		0x5
+#define SLIPORT_ERR2_REG_FAILURE_RQ		0x6
+		} if_type2;
+	} u;
+
+	/* IF type 0, BAR1 and if type 2, Bar 0 CSR register memory map */
+	void __iomem *PSMPHRregaddr;
+
+	/* Well-known SLI INTF register memory map. */
+	void __iomem *SLIINTFregaddr;
+
+	/* IF type 0, BAR 1 function CSR register memory map */
+	void __iomem *ISRregaddr;	/* HST_ISR register */
+	void __iomem *IMRregaddr;	/* HST_IMR register */
+	void __iomem *ISCRregaddr;	/* HST_ISCR register */
+	/* IF type 0, BAR 0 and if type 2, BAR 0 doorbell register memory map */
+	void __iomem *RQDBregaddr;	/* RQ_DOORBELL register */
+	void __iomem *WQDBregaddr;	/* WQ_DOORBELL register */
+	void __iomem *EQCQDBregaddr;	/* EQCQ_DOORBELL register */
+	void __iomem *MQDBregaddr;	/* MQ_DOORBELL register */
+	void __iomem *BMBXregaddr;	/* BootStrap MBX register */
+
+	uint32_t ue_mask_lo;
+	uint32_t ue_mask_hi;
+	struct lpfc_register sli_intf;
+	struct lpfc_pc_sli4_params pc_sli4_params;
+	struct msix_entry *msix_entries;
+	uint32_t cfg_eqn;
+	uint32_t msix_vec_nr;
+	struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
+	/* Pointers to the constructed SLI4 queues */
+	struct lpfc_queue **fp_eq; /* Fast-path event queue */
+	struct lpfc_queue *sp_eq;  /* Slow-path event queue */
+	struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
+	struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
+	struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
+	struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
+	struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
+	struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
+	struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
+	struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
+
+	/* Setup information for various queue parameters */
+	int eq_esize;
+	int eq_ecount;
+	int cq_esize;
+	int cq_ecount;
+	int wq_esize;
+	int wq_ecount;
+	int mq_esize;
+	int mq_ecount;
+	int rq_esize;
+	int rq_ecount;
+#define LPFC_SP_EQ_MAX_INTR_SEC         10000
+#define LPFC_FP_EQ_MAX_INTR_SEC         10000
+
+	uint32_t intr_enable;
+	struct lpfc_bmbx bmbx;
+	struct lpfc_max_cfg_param max_cfg_param;
+	uint16_t extents_in_use; /* must allocate resource extents. */
+	uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
+	uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
+	uint16_t next_rpi;
+	uint16_t scsi_xri_max;
+	uint16_t scsi_xri_cnt;
+	uint16_t scsi_xri_start;
+	struct list_head lpfc_free_sgl_list;
+	struct list_head lpfc_sgl_list;
+	struct lpfc_sglq **lpfc_els_sgl_array;
+	struct list_head lpfc_abts_els_sgl_list;
+	struct lpfc_scsi_buf **lpfc_scsi_psb_array;
+	struct list_head lpfc_abts_scsi_buf_list;
+	uint32_t total_sglq_bufs;
+	struct lpfc_sglq **lpfc_sglq_active_list;
+	struct list_head lpfc_rpi_hdr_list;
+	unsigned long *rpi_bmask;
+	uint16_t *rpi_ids;
+	uint16_t rpi_count;
+	struct list_head lpfc_rpi_blk_list;
+	unsigned long *xri_bmask;
+	uint16_t *xri_ids;
+	uint16_t xri_count;
+	struct list_head lpfc_xri_blk_list;
+	unsigned long *vfi_bmask;
+	uint16_t *vfi_ids;
+	uint16_t vfi_count;
+	struct list_head lpfc_vfi_blk_list;
+	struct lpfc_sli4_flags sli4_flags;
+	struct list_head sp_queue_event;
+	struct list_head sp_cqe_event_pool;
+	struct list_head sp_asynce_work_queue;
+	struct list_head sp_fcp_xri_aborted_work_queue;
+	struct list_head sp_els_xri_aborted_work_queue;
+	struct list_head sp_unsol_work_queue;
+	struct lpfc_sli4_link link_state;
+	struct lpfc_sli4_lnk_info lnk_info;
+	uint32_t pport_name_sta;
+#define LPFC_SLI4_PPNAME_NON	0
+#define LPFC_SLI4_PPNAME_GET	1
+	struct lpfc_iov iov;
+	spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
+	spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
+};
+
+enum lpfc_sge_type {
+	GEN_BUFF_TYPE,
+	SCSI_BUFF_TYPE
+};
+
+enum lpfc_sgl_state {
+	SGL_FREED,
+	SGL_ALLOCATED,
+	SGL_XRI_ABORTED
+};
+
+struct lpfc_sglq {
+	/* lpfc_sglqs are used in double linked lists */
+	struct list_head list;
+	struct list_head clist;
+	enum lpfc_sge_type buff_type; /* is this a scsi sgl */
+	enum lpfc_sgl_state state;
+	struct lpfc_nodelist *ndlp; /* ndlp associated with IO */
+	uint16_t iotag;         /* pre-assigned IO tag */
+	uint16_t sli4_lxritag;  /* logical pre-assigned xri. */
+	uint16_t sli4_xritag;   /* pre-assigned XRI, (OXID) tag. */
+	struct sli4_sge *sgl;	/* pre-assigned SGL */
+	void *virt;		/* virtual address. */
+	dma_addr_t phys;	/* physical address */
+};
+
+struct lpfc_rpi_hdr {
+	struct list_head list;
+	uint32_t len;
+	struct lpfc_dmabuf *dmabuf;
+	uint32_t page_count;
+	uint32_t start_rpi;
+};
+
+struct lpfc_rsrc_blks {
+	struct list_head list;
+	uint16_t rsrc_start;
+	uint16_t rsrc_size;
+	uint16_t rsrc_used;
+};
+
+/*
+ * SLI4 specific function prototypes
+ */
+int lpfc_pci_function_reset(struct lpfc_hba *);
+int lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *);
+int lpfc_sli4_hba_setup(struct lpfc_hba *);
+int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
+		     uint8_t, uint32_t, bool);
+void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
+void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
+			   struct lpfc_mbx_sge *);
+int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
+			       uint16_t);
+
+void lpfc_sli4_hba_reset(struct lpfc_hba *);
+struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
+			uint32_t);
+void lpfc_sli4_queue_free(struct lpfc_queue *);
+uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
+uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
+			struct lpfc_queue *, uint32_t, uint32_t);
+int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
+		       struct lpfc_queue *, uint32_t);
+uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
+			struct lpfc_queue *, uint32_t);
+uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
+			struct lpfc_queue *, struct lpfc_queue *, uint32_t);
+void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int);
+uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
+			 struct lpfc_queue *);
+int lpfc_sli4_queue_setup(struct lpfc_hba *);
+void lpfc_sli4_queue_unset(struct lpfc_hba *);
+int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
+int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
+uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
+int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
+int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba);
+int lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba);
+int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
+int lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *, struct list_head *,
+				    int);
+struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
+struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
+void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
+void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
+int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *);
+int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *);
+int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
+struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
+void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
+int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
+void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
+void lpfc_sli4_remove_rpis(struct lpfc_hba *);
+void lpfc_sli4_async_event_proc(struct lpfc_hba *);
+void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
+int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
+			void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
+void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
+			       struct sli4_wcqe_xri_aborted *);
+void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
+			       struct sli4_wcqe_xri_aborted *);
+void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *);
+void lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *);
+int lpfc_sli4_brdreset(struct lpfc_hba *);
+int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
+void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
+int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
+int lpfc_sli4_init_vpi(struct lpfc_vport *);
+uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
+uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
+void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_read_fcf_rec(struct lpfc_hba *, uint16_t);
+void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
+int lpfc_sli4_post_status_check(struct lpfc_hba *);
+uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
+uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_version.h b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_version.h
new file mode 100644
index 0000000..25cefc2
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_version.h
@@ -0,0 +1,28 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#define LPFC_DRIVER_VERSION "8.3.30"
+#define LPFC_DRIVER_NAME		"lpfc"
+#define LPFC_SP_DRIVER_HANDLER_NAME	"lpfc:sp"
+#define LPFC_FP_DRIVER_HANDLER_NAME	"lpfc:fp"
+
+#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
+		LPFC_DRIVER_VERSION
+#define LPFC_COPYRIGHT "Copyright(c) 2004-2009 Emulex.  All rights reserved."
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_vport.c b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_vport.c
new file mode 100644
index 0000000..0fe188e
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_vport.c
@@ -0,0 +1,877 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_version.h"
+#include "lpfc_vport.h"
+
+inline void lpfc_vport_set_state(struct lpfc_vport *vport,
+				 enum fc_vport_state new_state)
+{
+	struct fc_vport *fc_vport = vport->fc_vport;
+
+	if (fc_vport) {
+		/*
+		 * When the transport defines fc_vport_set state we will replace
+		 * this code with the following line
+		 */
+		/* fc_vport_set_state(fc_vport, new_state); */
+		if (new_state != FC_VPORT_INITIALIZING)
+			fc_vport->vport_last_state = fc_vport->vport_state;
+		fc_vport->vport_state = new_state;
+	}
+
+	/* for all the error states we will set the invternal state to FAILED */
+	switch (new_state) {
+	case FC_VPORT_NO_FABRIC_SUPP:
+	case FC_VPORT_NO_FABRIC_RSCS:
+	case FC_VPORT_FABRIC_LOGOUT:
+	case FC_VPORT_FABRIC_REJ_WWN:
+	case FC_VPORT_FAILED:
+		vport->port_state = LPFC_VPORT_FAILED;
+		break;
+	case FC_VPORT_LINKDOWN:
+		vport->port_state = LPFC_VPORT_UNKNOWN;
+		break;
+	default:
+		/* do nothing */
+		break;
+	}
+}
+
+static int
+lpfc_alloc_vpi(struct lpfc_hba *phba)
+{
+	unsigned long vpi;
+
+	spin_lock_irq(&phba->hbalock);
+	/* Start at bit 1 because vpi zero is reserved for the physical port */
+	vpi = find_next_zero_bit(phba->vpi_bmask, (phba->max_vpi + 1), 1);
+	if (vpi > phba->max_vpi)
+		vpi = 0;
+	else
+		set_bit(vpi, phba->vpi_bmask);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		phba->sli4_hba.max_cfg_param.vpi_used++;
+	spin_unlock_irq(&phba->hbalock);
+	return vpi;
+}
+
+static void
+lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
+{
+	if (vpi == 0)
+		return;
+	spin_lock_irq(&phba->hbalock);
+	clear_bit(vpi, phba->vpi_bmask);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		phba->sli4_hba.max_cfg_param.vpi_used--;
+	spin_unlock_irq(&phba->hbalock);
+}
+
+static int
+lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+	LPFC_MBOXQ_t *pmb;
+	MAILBOX_t *mb;
+	struct lpfc_dmabuf *mp;
+	int  rc;
+
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		return -ENOMEM;
+	}
+	mb = &pmb->u.mb;
+
+	rc = lpfc_read_sparam(phba, pmb, vport->vpi);
+	if (rc) {
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return -ENOMEM;
+	}
+
+	/*
+	 * Grab buffer pointer and clear context1 so we can use
+	 * lpfc_sli_issue_box_wait
+	 */
+	mp = (struct lpfc_dmabuf *) pmb->context1;
+	pmb->context1 = NULL;
+
+	pmb->vport = vport;
+	rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
+	if (rc != MBX_SUCCESS) {
+		if (signal_pending(current)) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
+					 "1830 Signal aborted mbxCmd x%x\n",
+					 mb->mbxCommand);
+			lpfc_mbuf_free(phba, mp->virt, mp->phys);
+			kfree(mp);
+			if (rc != MBX_TIMEOUT)
+				mempool_free(pmb, phba->mbox_mem_pool);
+			return -EINTR;
+		} else {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
+					 "1818 VPort failed init, mbxCmd x%x "
+					 "READ_SPARM mbxStatus x%x, rc = x%x\n",
+					 mb->mbxCommand, mb->mbxStatus, rc);
+			lpfc_mbuf_free(phba, mp->virt, mp->phys);
+			kfree(mp);
+			if (rc != MBX_TIMEOUT)
+				mempool_free(pmb, phba->mbox_mem_pool);
+			return -EIO;
+		}
+	}
+
+	memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
+	memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
+	       sizeof (struct lpfc_name));
+	memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+	       sizeof (struct lpfc_name));
+
+	lpfc_mbuf_free(phba, mp->virt, mp->phys);
+	kfree(mp);
+	mempool_free(pmb, phba->mbox_mem_pool);
+
+	return 0;
+}
+
+static int
+lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
+		      const char *name_type)
+{
+				/* ensure that IEEE format 1 addresses
+				 * contain zeros in bits 59-48
+				 */
+	if (!((wwn->u.wwn[0] >> 4) == 1 &&
+	      ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0)))
+		return 1;
+
+	lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+			"1822 Invalid %s: %02x:%02x:%02x:%02x:"
+			"%02x:%02x:%02x:%02x\n",
+			name_type,
+			wwn->u.wwn[0], wwn->u.wwn[1],
+			wwn->u.wwn[2], wwn->u.wwn[3],
+			wwn->u.wwn[4], wwn->u.wwn[5],
+			wwn->u.wwn[6], wwn->u.wwn[7]);
+	return 0;
+}
+
+static int
+lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
+{
+	struct lpfc_vport *vport;
+	unsigned long flags;
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	list_for_each_entry(vport, &phba->port_list, listentry) {
+		if (vport == new_vport)
+			continue;
+		/* If they match, return not unique */
+		if (memcmp(&vport->fc_sparam.portName,
+			   &new_vport->fc_sparam.portName,
+			   sizeof(struct lpfc_name)) == 0) {
+			spin_unlock_irqrestore(&phba->hbalock, flags);
+			return 0;
+		}
+	}
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+	return 1;
+}
+
+/**
+ * lpfc_discovery_wait - Wait for driver discovery to quiesce
+ * @vport: The virtual port for which this call is being executed.
+ *
+ * This driver calls this routine specifically from lpfc_vport_delete
+ * to enforce a synchronous execution of vport
+ * delete relative to discovery activities.  The
+ * lpfc_vport_delete routine should not return until it
+ * can reasonably guarantee that discovery has quiesced.
+ * Post FDISC LOGO, the driver must wait until its SAN teardown is
+ * complete and all resources recovered before allowing
+ * cleanup.
+ *
+ * This routine does not require any locks held.
+ **/
+static void lpfc_discovery_wait(struct lpfc_vport *vport)
+{
+	struct lpfc_hba *phba = vport->phba;
+	uint32_t wait_flags = 0;
+	unsigned long wait_time_max;
+	unsigned long start_time;
+
+	wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE |
+		     FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO;
+
+	/*
+	 * The time constraint on this loop is a balance between the
+	 * fabric RA_TOV value and dev_loss tmo.  The driver's
+	 * devloss_tmo is 10 giving this loop a 3x multiplier minimally.
+	 */
+	wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000);
+	wait_time_max += jiffies;
+	start_time = jiffies;
+	while (time_before(jiffies, wait_time_max)) {
+		if ((vport->num_disc_nodes > 0)    ||
+		    (vport->fc_flag & wait_flags)  ||
+		    ((vport->port_state > LPFC_VPORT_FAILED) &&
+		     (vport->port_state < LPFC_VPORT_READY))) {
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
+					"1833 Vport discovery quiesce Wait:"
+					" state x%x fc_flags x%x"
+					" num_nodes x%x, waiting 1000 msecs"
+					" total wait msecs x%x\n",
+					vport->port_state, vport->fc_flag,
+					vport->num_disc_nodes,
+					jiffies_to_msecs(jiffies - start_time));
+			msleep(1000);
+		} else {
+			/* Base case.  Wait variants satisfied.  Break out */
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
+					 "1834 Vport discovery quiesced:"
+					 " state x%x fc_flags x%x"
+					 " wait msecs x%x\n",
+					 vport->port_state, vport->fc_flag,
+					 jiffies_to_msecs(jiffies
+						- start_time));
+			break;
+		}
+	}
+
+	if (time_after(jiffies, wait_time_max))
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+				"1835 Vport discovery quiesce failed:"
+				" state x%x fc_flags x%x wait msecs x%x\n",
+				vport->port_state, vport->fc_flag,
+				jiffies_to_msecs(jiffies - start_time));
+}
+
+int
+lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost = fc_vport->shost;
+	struct lpfc_vport *pport = (struct lpfc_vport *) shost->hostdata;
+	struct lpfc_hba   *phba = pport->phba;
+	struct lpfc_vport *vport = NULL;
+	int instance;
+	int vpi;
+	int rc = VPORT_ERROR;
+	int status;
+
+	if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+				"1808 Create VPORT failed: "
+				"NPIV is not enabled: SLImode:%d\n",
+				phba->sli_rev);
+		rc = VPORT_INVAL;
+		goto error_out;
+	}
+
+	vpi = lpfc_alloc_vpi(phba);
+	if (vpi == 0) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+				"1809 Create VPORT failed: "
+				"Max VPORTs (%d) exceeded\n",
+				phba->max_vpi);
+		rc = VPORT_NORESOURCES;
+		goto error_out;
+	}
+
+	/* Assign an unused board number */
+	if ((instance = lpfc_get_instance()) < 0) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+				"1810 Create VPORT failed: Cannot get "
+				"instance number\n");
+		lpfc_free_vpi(phba, vpi);
+		rc = VPORT_NORESOURCES;
+		goto error_out;
+	}
+
+	vport = lpfc_create_port(phba, instance, &fc_vport->dev);
+	if (!vport) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+				"1811 Create VPORT failed: vpi x%x\n", vpi);
+		lpfc_free_vpi(phba, vpi);
+		rc = VPORT_NORESOURCES;
+		goto error_out;
+	}
+
+	vport->vpi = vpi;
+	lpfc_debugfs_initialize(vport);
+
+	if ((status = lpfc_vport_sparm(phba, vport))) {
+		if (status == -EINTR) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+					 "1831 Create VPORT Interrupted.\n");
+			rc = VPORT_ERROR;
+		} else {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+					 "1813 Create VPORT failed. "
+					 "Cannot get sparam\n");
+			rc = VPORT_NORESOURCES;
+		}
+		lpfc_free_vpi(phba, vpi);
+		destroy_port(vport);
+		goto error_out;
+	}
+
+	u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
+	u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
+
+	memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
+	memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
+
+	if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") ||
+	    !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+				 "1821 Create VPORT failed. "
+				 "Invalid WWN format\n");
+		lpfc_free_vpi(phba, vpi);
+		destroy_port(vport);
+		rc = VPORT_INVAL;
+		goto error_out;
+	}
+
+	if (!lpfc_unique_wwpn(phba, vport)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+				 "1823 Create VPORT failed. "
+				 "Duplicate WWN on HBA\n");
+		lpfc_free_vpi(phba, vpi);
+		destroy_port(vport);
+		rc = VPORT_INVAL;
+		goto error_out;
+	}
+
+	/* Create binary sysfs attribute for vport */
+	lpfc_alloc_sysfs_attr(vport);
+
+	*(struct lpfc_vport **)fc_vport->dd_data = vport;
+	vport->fc_vport = fc_vport;
+
+	/*
+	 * In SLI4, the vpi must be activated before it can be used
+	 * by the port.
+	 */
+	if ((phba->sli_rev == LPFC_SLI_REV4) &&
+	    (pport->fc_flag & FC_VFI_REGISTERED)) {
+		rc = lpfc_sli4_init_vpi(vport);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+					"1838 Failed to INIT_VPI on vpi %d "
+					"status %d\n", vpi, rc);
+			rc = VPORT_NORESOURCES;
+			lpfc_free_vpi(phba, vpi);
+			goto error_out;
+		}
+	} else if (phba->sli_rev == LPFC_SLI_REV4) {
+		/*
+		 * Driver cannot INIT_VPI now. Set the flags to
+		 * init_vpi when reg_vfi complete.
+		 */
+		vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+		lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+		rc = VPORT_OK;
+		goto out;
+	}
+
+	if ((phba->link_state < LPFC_LINK_UP) ||
+	    (pport->port_state < LPFC_FABRIC_CFG_LINK) ||
+	    (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
+		lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+		rc = VPORT_OK;
+		goto out;
+	}
+
+	if (disable) {
+		lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
+		rc = VPORT_OK;
+		goto out;
+	}
+
+	/* Use the Physical nodes Fabric NDLP to determine if the link is
+	 * up and ready to FDISC.
+	 */
+	ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+	    ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+		if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
+			lpfc_set_disctmo(vport);
+			lpfc_initial_fdisc(vport);
+		} else {
+			lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+					 "0262 No NPIV Fabric support\n");
+		}
+	} else {
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+	}
+	rc = VPORT_OK;
+
+out:
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+			"1825 Vport Created.\n");
+	lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
+error_out:
+	return rc;
+}
+
+static int
+disable_vport(struct fc_vport *fc_vport)
+{
+	struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+	long timeout;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	ndlp = lpfc_findnode_did(vport, Fabric_DID);
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp)
+	    && phba->link_state >= LPFC_LINK_UP) {
+		vport->unreg_vpi_cmpl = VPORT_INVAL;
+		timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
+		if (!lpfc_issue_els_npiv_logo(vport, ndlp))
+			while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
+				timeout = schedule_timeout(timeout);
+	}
+
+	lpfc_sli_host_down(vport);
+
+	/* Mark all nodes for discovery so we can remove them by
+	 * calling lpfc_cleanup_rpis(vport, 1)
+	 */
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			continue;
+		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+			continue;
+		lpfc_disc_state_machine(vport, ndlp, NULL,
+					NLP_EVT_DEVICE_RECOVERY);
+	}
+	lpfc_cleanup_rpis(vport, 1);
+
+	lpfc_stop_vport_timers(vport);
+	lpfc_unreg_all_rpis(vport);
+	lpfc_unreg_default_rpis(vport);
+	/*
+	 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
+	 * scsi_host_put() to release the vport.
+	 */
+	lpfc_mbx_unreg_vpi(vport);
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+	spin_unlock_irq(shost->host_lock);
+
+	lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+			 "1826 Vport Disabled.\n");
+	return VPORT_OK;
+}
+
+static int
+enable_vport(struct fc_vport *fc_vport)
+{
+	struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+	struct lpfc_hba   *phba = vport->phba;
+	struct lpfc_nodelist *ndlp = NULL;
+	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+	if ((phba->link_state < LPFC_LINK_UP) ||
+	    (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
+		lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+		return VPORT_OK;
+	}
+
+	spin_lock_irq(shost->host_lock);
+	vport->load_flag |= FC_LOADING;
+	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+	spin_unlock_irq(shost->host_lock);
+
+	/* Use the Physical nodes Fabric NDLP to determine if the link is
+	 * up and ready to FDISC.
+	 */
+	ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp)
+	    && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+		if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
+			lpfc_set_disctmo(vport);
+			lpfc_initial_fdisc(vport);
+		} else {
+			lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+					 "0264 No NPIV Fabric support\n");
+		}
+	} else {
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+	}
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+			 "1827 Vport Enabled.\n");
+	return VPORT_OK;
+}
+
+int
+lpfc_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+	if (disable)
+		return disable_vport(fc_vport);
+	else
+		return enable_vport(fc_vport);
+}
+
+
+int
+lpfc_vport_delete(struct fc_vport *fc_vport)
+{
+	struct lpfc_nodelist *ndlp = NULL;
+	struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost;
+	struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+	struct lpfc_hba   *phba = vport->phba;
+	long timeout;
+
+	if (vport->port_type == LPFC_PHYSICAL_PORT) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+				 "1812 vport_delete failed: Cannot delete "
+				 "physical host\n");
+		return VPORT_ERROR;
+	}
+
+	/* If the vport is a static vport fail the deletion. */
+	if ((vport->vport_flag & STATIC_VPORT) &&
+		!(phba->pport->load_flag & FC_UNLOADING)) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+				 "1837 vport_delete failed: Cannot delete "
+				 "static vport.\n");
+		return VPORT_ERROR;
+	}
+	spin_lock_irq(&phba->hbalock);
+	vport->load_flag |= FC_UNLOADING;
+	spin_unlock_irq(&phba->hbalock);
+	/*
+	 * If we are not unloading the driver then prevent the vport_delete
+	 * from happening until after this vport's discovery is finished.
+	 */
+	if (!(phba->pport->load_flag & FC_UNLOADING)) {
+		int check_count = 0;
+		while (check_count < ((phba->fc_ratov * 3) + 3) &&
+		       vport->port_state > LPFC_VPORT_FAILED &&
+		       vport->port_state < LPFC_VPORT_READY) {
+			check_count++;
+			msleep(1000);
+		}
+		if (vport->port_state > LPFC_VPORT_FAILED &&
+		    vport->port_state < LPFC_VPORT_READY)
+			return -EAGAIN;
+	}
+	/*
+	 * This is a bit of a mess.  We want to ensure the shost doesn't get
+	 * torn down until we're done with the embedded lpfc_vport structure.
+	 *
+	 * Beyond holding a reference for this function, we also need a
+	 * reference for outstanding I/O requests we schedule during delete
+	 * processing.  But once we scsi_remove_host() we can no longer obtain
+	 * a reference through scsi_host_get().
+	 *
+	 * So we take two references here.  We release one reference at the
+	 * bottom of the function -- after delinking the vport.  And we
+	 * release the other at the completion of the unreg_vpi that get's
+	 * initiated after we've disposed of all other resources associated
+	 * with the port.
+	 */
+	if (!scsi_host_get(shost))
+		return VPORT_INVAL;
+	if (!scsi_host_get(shost)) {
+		scsi_host_put(shost);
+		return VPORT_INVAL;
+	}
+	lpfc_free_sysfs_attr(vport);
+
+	lpfc_debugfs_terminate(vport);
+
+	/* Remove FC host and then SCSI host with the vport */
+	fc_remove_host(lpfc_shost_from_vport(vport));
+	scsi_remove_host(lpfc_shost_from_vport(vport));
+
+	ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+
+	/* In case of driver unload, we shall not perform fabric logo as the
+	 * worker thread already stopped at this stage and, in this case, we
+	 * can safely skip the fabric logo.
+	 */
+	if (phba->pport->load_flag & FC_UNLOADING) {
+		if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+		    ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
+		    phba->link_state >= LPFC_LINK_UP) {
+			/* First look for the Fabric ndlp */
+			ndlp = lpfc_findnode_did(vport, Fabric_DID);
+			if (!ndlp)
+				goto skip_logo;
+			else if (!NLP_CHK_NODE_ACT(ndlp)) {
+				ndlp = lpfc_enable_node(vport, ndlp,
+							NLP_STE_UNUSED_NODE);
+				if (!ndlp)
+					goto skip_logo;
+			}
+			/* Remove ndlp from vport npld list */
+			lpfc_dequeue_node(vport, ndlp);
+
+			/* Indicate free memory when release */
+			spin_lock_irq(&phba->ndlp_lock);
+			NLP_SET_FREE_REQ(ndlp);
+			spin_unlock_irq(&phba->ndlp_lock);
+			/* Kick off release ndlp when it can be safely done */
+			lpfc_nlp_put(ndlp);
+		}
+		goto skip_logo;
+	}
+
+	/* Otherwise, we will perform fabric logo as needed */
+	if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+	    ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
+	    phba->link_state >= LPFC_LINK_UP &&
+	    phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
+		if (vport->cfg_enable_da_id) {
+			timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
+			if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
+				while (vport->ct_flags && timeout)
+					timeout = schedule_timeout(timeout);
+			else
+				lpfc_printf_log(vport->phba, KERN_WARNING,
+						LOG_VPORT,
+						"1829 CT command failed to "
+						"delete objects on fabric\n");
+		}
+		/* First look for the Fabric ndlp */
+		ndlp = lpfc_findnode_did(vport, Fabric_DID);
+		if (!ndlp) {
+			/* Cannot find existing Fabric ndlp, allocate one */
+			ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+			if (!ndlp)
+				goto skip_logo;
+			lpfc_nlp_init(vport, ndlp, Fabric_DID);
+			/* Indicate free memory when release */
+			NLP_SET_FREE_REQ(ndlp);
+		} else {
+			if (!NLP_CHK_NODE_ACT(ndlp)) {
+				ndlp = lpfc_enable_node(vport, ndlp,
+						NLP_STE_UNUSED_NODE);
+				if (!ndlp)
+					goto skip_logo;
+			}
+
+			/* Remove ndlp from vport list */
+			lpfc_dequeue_node(vport, ndlp);
+			spin_lock_irq(&phba->ndlp_lock);
+			if (!NLP_CHK_FREE_REQ(ndlp))
+				/* Indicate free memory when release */
+				NLP_SET_FREE_REQ(ndlp);
+			else {
+				/* Skip this if ndlp is already in free mode */
+				spin_unlock_irq(&phba->ndlp_lock);
+				goto skip_logo;
+			}
+			spin_unlock_irq(&phba->ndlp_lock);
+		}
+
+		/*
+		 * If the vpi is not registered, then a valid FDISC doesn't
+		 * exist and there is no need for a ELS LOGO.  Just cleanup
+		 * the ndlp.
+		 */
+		if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) {
+			lpfc_nlp_put(ndlp);
+			goto skip_logo;
+		}
+
+		vport->unreg_vpi_cmpl = VPORT_INVAL;
+		timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
+		if (!lpfc_issue_els_npiv_logo(vport, ndlp))
+			while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
+				timeout = schedule_timeout(timeout);
+	}
+
+	if (!(phba->pport->load_flag & FC_UNLOADING))
+		lpfc_discovery_wait(vport);
+
+skip_logo:
+	lpfc_cleanup(vport);
+	lpfc_sli_host_down(vport);
+
+	lpfc_stop_vport_timers(vport);
+
+	if (!(phba->pport->load_flag & FC_UNLOADING)) {
+		lpfc_unreg_all_rpis(vport);
+		lpfc_unreg_default_rpis(vport);
+		/*
+		 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
+		 * does the scsi_host_put() to release the vport.
+		 */
+		if (lpfc_mbx_unreg_vpi(vport))
+			scsi_host_put(shost);
+	} else
+		scsi_host_put(shost);
+
+	lpfc_free_vpi(phba, vport->vpi);
+	vport->work_port_events = 0;
+	spin_lock_irq(&phba->hbalock);
+	list_del_init(&vport->listentry);
+	spin_unlock_irq(&phba->hbalock);
+	lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+			 "1828 Vport Deleted.\n");
+	scsi_host_put(shost);
+	return VPORT_OK;
+}
+
+struct lpfc_vport **
+lpfc_create_vport_work_array(struct lpfc_hba *phba)
+{
+	struct lpfc_vport *port_iterator;
+	struct lpfc_vport **vports;
+	int index = 0;
+	vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *),
+			 GFP_KERNEL);
+	if (vports == NULL)
+		return NULL;
+	spin_lock_irq(&phba->hbalock);
+	list_for_each_entry(port_iterator, &phba->port_list, listentry) {
+		if (port_iterator->load_flag & FC_UNLOADING)
+			continue;
+		if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
+			lpfc_printf_vlog(port_iterator, KERN_ERR, LOG_VPORT,
+					 "1801 Create vport work array FAILED: "
+					 "cannot do scsi_host_get\n");
+			continue;
+		}
+		vports[index++] = port_iterator;
+	}
+	spin_unlock_irq(&phba->hbalock);
+	return vports;
+}
+
+void
+lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
+{
+	int i;
+	if (vports == NULL)
+		return;
+	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
+		scsi_host_put(lpfc_shost_from_vport(vports[i]));
+	kfree(vports);
+}
+
+
+/**
+ * lpfc_vport_reset_stat_data - Reset the statistical data for the vport
+ * @vport: Pointer to vport object.
+ *
+ * This function resets the statistical data for the vport. This function
+ * is called with the host_lock held
+ **/
+void
+lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			continue;
+		if (ndlp->lat_data)
+			memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
+				sizeof(struct lpfc_scsicmd_bkt));
+	}
+}
+
+
+/**
+ * lpfc_alloc_bucket - Allocate data buffer required for statistical data
+ * @vport: Pointer to vport object.
+ *
+ * This function allocates data buffer required for all the FC
+ * nodes of the vport to collect statistical data.
+ **/
+void
+lpfc_alloc_bucket(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			continue;
+
+		kfree(ndlp->lat_data);
+		ndlp->lat_data = NULL;
+
+		if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
+			ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
+					 sizeof(struct lpfc_scsicmd_bkt),
+					 GFP_ATOMIC);
+
+			if (!ndlp->lat_data)
+				lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+					"0287 lpfc_alloc_bucket failed to "
+					"allocate statistical data buffer DID "
+					"0x%x\n", ndlp->nlp_DID);
+		}
+	}
+}
+
+/**
+ * lpfc_free_bucket - Free data buffer required for statistical data
+ * @vport: Pointer to vport object.
+ *
+ * Th function frees statistical data buffer of all the FC
+ * nodes of the vport.
+ **/
+void
+lpfc_free_bucket(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+
+	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+		if (!NLP_CHK_NODE_ACT(ndlp))
+			continue;
+
+		kfree(ndlp->lat_data);
+		ndlp->lat_data = NULL;
+	}
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_vport.h b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_vport.h
new file mode 100644
index 0000000..9082834
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/scsi/lpfc/lpfc_vport.h
@@ -0,0 +1,119 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2004-2006 Emulex.  All rights reserved.           *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#ifndef _H_LPFC_VPORT
+#define _H_LPFC_VPORT
+
+/* API version values (each will be an individual bit) */
+#define VPORT_API_VERSION_1	0x01
+
+/* Values returned via lpfc_vport_getinfo() */
+struct vport_info {
+
+	uint32_t api_versions;
+	uint8_t linktype;
+#define  VPORT_TYPE_PHYSICAL	0
+#define  VPORT_TYPE_VIRTUAL	1
+
+	uint8_t state;
+#define  VPORT_STATE_OFFLINE	0
+#define  VPORT_STATE_ACTIVE	1
+#define  VPORT_STATE_FAILED	2
+
+	uint8_t fail_reason;
+	uint8_t prev_fail_reason;
+#define  VPORT_FAIL_UNKNOWN	0
+#define  VPORT_FAIL_LINKDOWN	1
+#define  VPORT_FAIL_FAB_UNSUPPORTED	2
+#define  VPORT_FAIL_FAB_NORESOURCES	3
+#define  VPORT_FAIL_FAB_LOGOUT	4
+#define  VPORT_FAIL_ADAP_NORESOURCES	5
+
+	uint8_t node_name[8];	/* WWNN */
+	uint8_t port_name[8];	/* WWPN */
+
+	struct Scsi_Host *shost;
+
+/* Following values are valid only on physical links */
+	uint32_t vports_max;
+	uint32_t vports_inuse;
+	uint32_t rpi_max;
+	uint32_t rpi_inuse;
+#define  VPORT_CNT_INVALID	0xFFFFFFFF
+};
+
+/* data used  in link creation */
+struct vport_data {
+	uint32_t api_version;
+
+	uint32_t options;
+#define  VPORT_OPT_AUTORETRY	0x01
+
+	uint8_t node_name[8];	/* WWNN */
+	uint8_t port_name[8];	/* WWPN */
+
+/*
+ *  Upon successful creation, vport_shost will point to the new Scsi_Host
+ *  structure for the new virtual link.
+ */
+	struct Scsi_Host *vport_shost;
+};
+
+/* API function return codes */
+#define VPORT_OK	0
+#define VPORT_ERROR	-1
+#define VPORT_INVAL	-2
+#define VPORT_NOMEM	-3
+#define VPORT_NORESOURCES	-4
+
+int lpfc_vport_create(struct fc_vport *, bool);
+int lpfc_vport_delete(struct fc_vport *);
+int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
+int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
+struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *);
+void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **);
+
+/*
+ *  queuecommand  VPORT-specific return codes. Specified in  the host byte code.
+ *  Returned when the virtual link has failed or is not active.
+ */
+#define  DID_VPORT_ERROR	0x0f
+
+#define VPORT_INFO	0x1
+#define VPORT_CREATE	0x2
+#define VPORT_DELETE	0x4
+
+struct vport_cmd_tag {
+	uint32_t cmd;
+	struct vport_data cdata;
+	struct vport_info cinfo;
+	void *vport;
+	int vport_num;
+};
+
+void lpfc_vport_set_state(struct lpfc_vport *vport,
+			  enum fc_vport_state new_state);
+
+void lpfc_vport_reset_stat_data(struct lpfc_vport *);
+void lpfc_alloc_bucket(struct lpfc_vport *);
+void lpfc_free_bucket(struct lpfc_vport *);
+
+#endif /* H_LPFC_VPORT */