[Feature]add MT2731_MP2_MR2_SVN388 baseline version

Change-Id: Ief04314834b31e27effab435d3ca8ba33b499059
diff --git a/src/kernel/linux/v4.14/drivers/scsi/megaraid/Kconfig.megaraid b/src/kernel/linux/v4.14/drivers/scsi/megaraid/Kconfig.megaraid
new file mode 100644
index 0000000..17419e3
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/megaraid/Kconfig.megaraid
@@ -0,0 +1,85 @@
+config MEGARAID_NEWGEN
+	bool "LSI Logic New Generation RAID Device Drivers"
+	depends on PCI && SCSI
+	help
+	LSI Logic RAID Device Drivers
+
+config MEGARAID_MM
+	tristate "LSI Logic Management Module (New Driver)"
+	depends on PCI && SCSI && MEGARAID_NEWGEN
+	help
+	Management Module provides ioctl, sysfs support for LSI Logic
+	RAID controllers.
+	To compile this driver as a module, choose M here: the
+	module will be called megaraid_mm
+
+
+config MEGARAID_MAILBOX
+	tristate "LSI Logic MegaRAID Driver (New Driver)"
+	depends on PCI && SCSI && MEGARAID_MM
+	help
+	List of supported controllers
+
+	OEM	Product Name		VID :DID :SVID:SSID
+	---	------------		---- ---- ---- ----
+	Dell PERC3/QC			101E:1960:1028:0471
+	Dell PERC3/DC			101E:1960:1028:0493
+	Dell PERC3/SC			101E:1960:1028:0475
+	Dell PERC3/Di			1028:000E:1028:0123
+	Dell PERC4/SC			1000:1960:1028:0520
+	Dell PERC4/DC			1000:1960:1028:0518
+	Dell PERC4/QC			1000:0407:1028:0531
+	Dell PERC4/Di			1028:000F:1028:014A
+	Dell PERC 4e/Si			1028:0013:1028:016c
+	Dell PERC 4e/Di			1028:0013:1028:016d
+	Dell PERC 4e/Di			1028:0013:1028:016e
+	Dell PERC 4e/Di			1028:0013:1028:016f
+	Dell PERC 4e/Di			1028:0013:1028:0170
+	Dell PERC 4e/DC			1000:0408:1028:0002
+	Dell PERC 4e/SC			1000:0408:1028:0001
+	LSI MegaRAID SCSI 320-0		1000:1960:1000:A520
+	LSI MegaRAID SCSI 320-1		1000:1960:1000:0520
+	LSI MegaRAID SCSI 320-2		1000:1960:1000:0518
+	LSI MegaRAID SCSI 320-0X	1000:0407:1000:0530
+	LSI MegaRAID SCSI 320-2X	1000:0407:1000:0532
+	LSI MegaRAID SCSI 320-4X	1000:0407:1000:0531
+	LSI MegaRAID SCSI 320-1E	1000:0408:1000:0001
+	LSI MegaRAID SCSI 320-2E	1000:0408:1000:0002
+	LSI MegaRAID SATA 150-4		1000:1960:1000:4523
+	LSI MegaRAID SATA 150-6		1000:1960:1000:0523
+	LSI MegaRAID SATA 300-4X	1000:0409:1000:3004
+	LSI MegaRAID SATA 300-8X	1000:0409:1000:3008
+	INTEL RAID Controller SRCU42X	1000:0407:8086:0532
+	INTEL RAID Controller SRCS16	1000:1960:8086:0523
+	INTEL RAID Controller SRCU42E	1000:0408:8086:0002
+	INTEL RAID Controller SRCZCRX	1000:0407:8086:0530
+	INTEL RAID Controller SRCS28X	1000:0409:8086:3008
+	INTEL RAID Controller SROMBU42E	1000:0408:8086:3431
+	INTEL RAID Controller SROMBU42E	1000:0408:8086:3499
+	INTEL RAID Controller SRCU51L	1000:1960:8086:0520
+	FSC MegaRAID PCI Express ROMB	1000:0408:1734:1065
+	ACER MegaRAID ROMB-2E		1000:0408:1025:004D
+	NEC MegaRAID PCI Express ROMB	1000:0408:1033:8287
+
+	To compile this driver as a module, choose M here: the
+	module will be called megaraid_mbox
+
+config MEGARAID_LEGACY
+	tristate "LSI Logic Legacy MegaRAID Driver"
+	depends on PCI && SCSI
+	help
+	This driver supports the LSI MegaRAID 418, 428, 438, 466, 762, 490
+	and 467 SCSI host adapters. This driver also support the all U320
+	RAID controllers
+
+	To compile this driver as a module, choose M here: the
+	module will be called megaraid
+
+config MEGARAID_SAS
+	tristate "LSI Logic MegaRAID SAS RAID Module"
+	depends on PCI && SCSI
+	help
+	Module for LSI Logic's SAS based RAID controllers.
+	To compile this driver as a module, choose 'm' here.
+	Module will be called megaraid_sas
+
diff --git a/src/kernel/linux/v4.14/drivers/scsi/megaraid/Makefile b/src/kernel/linux/v4.14/drivers/scsi/megaraid/Makefile
new file mode 100644
index 0000000..6e74d21
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/megaraid/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_MEGARAID_MM)	+= megaraid_mm.o
+obj-$(CONFIG_MEGARAID_MAILBOX)	+= megaraid_mbox.o
+obj-$(CONFIG_MEGARAID_SAS)	+= megaraid_sas.o
+megaraid_sas-objs := megaraid_sas_base.o megaraid_sas_fusion.o \
+	megaraid_sas_fp.o
diff --git a/src/kernel/linux/v4.14/drivers/scsi/megaraid/mbox_defs.h b/src/kernel/linux/v4.14/drivers/scsi/megaraid/mbox_defs.h
new file mode 100644
index 0000000..e01c6f7
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/megaraid/mbox_defs.h
@@ -0,0 +1,790 @@
+/*
+ *
+ *			Linux MegaRAID Unified device driver
+ *
+ * Copyright (c) 2003-2004  LSI Logic Corporation.
+ *
+ *	   This program is free software; you can redistribute it and/or
+ *	   modify it under the terms of the GNU General Public License
+ *	   as published by the Free Software Foundation; either version
+ *	   2 of the License, or (at your option) any later version.
+ *
+ * FILE		: mbox_defs.h
+ *
+ */
+#ifndef _MRAID_MBOX_DEFS_H_
+#define _MRAID_MBOX_DEFS_H_
+
+#include <linux/types.h>
+
+/*
+ * Commands and states for mailbox based controllers
+ */
+
+#define MBOXCMD_LREAD		0x01
+#define MBOXCMD_LWRITE		0x02
+#define MBOXCMD_PASSTHRU	0x03
+#define MBOXCMD_ADPEXTINQ	0x04
+#define MBOXCMD_ADAPTERINQ	0x05
+#define MBOXCMD_LREAD64		0xA7
+#define MBOXCMD_LWRITE64	0xA8
+#define MBOXCMD_PASSTHRU64	0xC3
+#define MBOXCMD_EXTPTHRU	0xE3
+
+#define MAIN_MISC_OPCODE	0xA4
+#define GET_MAX_SG_SUPPORT	0x01
+#define SUPPORT_EXT_CDB		0x16
+
+#define FC_NEW_CONFIG		0xA1
+#define NC_SUBOP_PRODUCT_INFO	0x0E
+#define NC_SUBOP_ENQUIRY3	0x0F
+#define ENQ3_GET_SOLICITED_FULL	0x02
+#define OP_DCMD_READ_CONFIG	0x04
+#define NEW_READ_CONFIG_8LD	0x67
+#define READ_CONFIG_8LD		0x07
+#define FLUSH_ADAPTER		0x0A
+#define FLUSH_SYSTEM		0xFE
+
+/*
+ * Command for random deletion of logical drives
+ */
+#define	FC_DEL_LOGDRV		0xA4
+#define	OP_SUP_DEL_LOGDRV	0x2A
+#define OP_GET_LDID_MAP		0x18
+#define OP_DEL_LOGDRV		0x1C
+
+/*
+ * BIOS commands
+ */
+#define IS_BIOS_ENABLED		0x62
+#define GET_BIOS		0x01
+#define CHNL_CLASS		0xA9
+#define GET_CHNL_CLASS		0x00
+#define SET_CHNL_CLASS		0x01
+#define CH_RAID			0x01
+#define CH_SCSI			0x00
+#define BIOS_PVT_DATA		0x40
+#define GET_BIOS_PVT_DATA	0x00
+
+
+/*
+ * Commands to support clustering
+ */
+#define GET_TARGET_ID		0x7D
+#define CLUSTER_OP		0x70
+#define GET_CLUSTER_MODE	0x02
+#define CLUSTER_CMD		0x6E
+#define RESERVE_LD		0x01
+#define RELEASE_LD		0x02
+#define RESET_RESERVATIONS	0x03
+#define RESERVATION_STATUS	0x04
+#define RESERVE_PD		0x05
+#define RELEASE_PD		0x06
+
+
+/*
+ * Module battery status
+ */
+#define BATTERY_MODULE_MISSING		0x01
+#define BATTERY_LOW_VOLTAGE		0x02
+#define BATTERY_TEMP_HIGH		0x04
+#define BATTERY_PACK_MISSING		0x08
+#define BATTERY_CHARGE_MASK		0x30
+#define BATTERY_CHARGE_DONE		0x00
+#define BATTERY_CHARGE_INPROG		0x10
+#define BATTERY_CHARGE_FAIL		0x20
+#define BATTERY_CYCLES_EXCEEDED		0x40
+
+/*
+ * Physical drive states.
+ */
+#define PDRV_UNCNF	0
+#define PDRV_ONLINE	3
+#define PDRV_FAILED	4
+#define PDRV_RBLD	5
+#define PDRV_HOTSPARE	6
+
+
+/*
+ * Raid logical drive states.
+ */
+#define RDRV_OFFLINE	0
+#define RDRV_DEGRADED	1
+#define RDRV_OPTIMAL	2
+#define RDRV_DELETED	3
+
+/*
+ * Read, write and cache policies
+ */
+#define NO_READ_AHEAD		0
+#define READ_AHEAD		1
+#define ADAP_READ_AHEAD		2
+#define WRMODE_WRITE_THRU	0
+#define WRMODE_WRITE_BACK	1
+#define CACHED_IO		0
+#define DIRECT_IO		1
+
+#define MAX_LOGICAL_DRIVES_8LD		8
+#define MAX_LOGICAL_DRIVES_40LD		40
+#define FC_MAX_PHYSICAL_DEVICES		256
+#define MAX_MBOX_CHANNELS		5
+#define MAX_MBOX_TARGET			15
+#define MBOX_MAX_PHYSICAL_DRIVES	MAX_MBOX_CHANNELS*MAX_MBOX_TARGET
+#define MAX_ROW_SIZE_40LD		32
+#define MAX_ROW_SIZE_8LD		8
+#define SPAN_DEPTH_8_SPANS		8
+#define SPAN_DEPTH_4_SPANS		4
+#define MAX_REQ_SENSE_LEN		0x20
+
+
+
+/**
+ * struct mbox_t - Driver and f/w handshake structure.
+ * @cmd		: firmware command
+ * @cmdid	: command id
+ * @numsectors	: number of sectors to be transferred
+ * @lba		: Logical Block Address on LD
+ * @xferaddr	: DMA address for data transfer
+ * @logdrv	: logical drive number
+ * @numsge	: number of scatter gather elements in sg list
+ * @resvd	: reserved
+ * @busy	: f/w busy, must wait to issue more commands.
+ * @numstatus	: number of commands completed.
+ * @status	: status of the commands completed
+ * @completed	: array of completed command ids.
+ * @poll	: poll and ack sequence
+ * @ack		: poll and ack sequence
+ *
+ * The central handshake structure between the driver and the firmware. This
+ * structure must be allocated by the driver and aligned at 8-byte boundary.
+ */
+#define MBOX_MAX_FIRMWARE_STATUS	46
+typedef struct {
+	uint8_t		cmd;
+	uint8_t		cmdid;
+	uint16_t	numsectors;
+	uint32_t	lba;
+	uint32_t	xferaddr;
+	uint8_t		logdrv;
+	uint8_t		numsge;
+	uint8_t		resvd;
+	uint8_t		busy;
+	uint8_t		numstatus;
+	uint8_t		status;
+	uint8_t		completed[MBOX_MAX_FIRMWARE_STATUS];
+	uint8_t		poll;
+	uint8_t		ack;
+} __attribute__ ((packed)) mbox_t;
+
+
+/**
+ * mbox64_t - 64-bit extension for the mailbox
+ * @segment_lo	: the low 32-bits of the address of the scatter-gather list
+ * @segment_hi	: the upper 32-bits of the address of the scatter-gather list
+ * @mbox	: 32-bit mailbox, whose xferadder field must be set to
+ *		0xFFFFFFFF
+ *
+ * This is the extension of the 32-bit mailbox to be able to perform DMA
+ * beyond 4GB address range.
+ */
+typedef struct {
+	uint32_t	xferaddr_lo;
+	uint32_t	xferaddr_hi;
+	mbox_t		mbox32;
+} __attribute__ ((packed)) mbox64_t;
+
+/*
+ * mailbox structure used for internal commands
+ */
+typedef struct {
+	u8	cmd;
+	u8	cmdid;
+	u8	opcode;
+	u8	subopcode;
+	u32	lba;
+	u32	xferaddr;
+	u8	logdrv;
+	u8	rsvd[3];
+	u8	numstatus;
+	u8	status;
+} __attribute__ ((packed)) int_mbox_t;
+
+/**
+ * mraid_passthru_t - passthru structure to issue commands to physical devices
+ * @timeout		: command timeout, 0=6sec, 1=60sec, 2=10min, 3=3hr
+ * @ars			: set if ARS required after check condition
+ * @islogical		: set if command meant for logical devices
+ * @logdrv		: logical drive number if command for LD
+ * @channel		: Channel on which physical device is located
+ * @target		: SCSI target of the device
+ * @queuetag		: unused
+ * @queueaction		: unused
+ * @cdb			: SCSI CDB
+ * @cdblen		: length of the CDB
+ * @reqsenselen		: amount of request sense data to be returned
+ * @reqsensearea	: Sense information buffer
+ * @numsge		: number of scatter-gather elements in the sg list
+ * @scsistatus		: SCSI status of the command completed.
+ * @dataxferaddr	: DMA data transfer address
+ * @dataxferlen		: amount of the data to be transferred.
+ */
+typedef struct {
+	uint8_t		timeout		:3;
+	uint8_t		ars		:1;
+	uint8_t		reserved	:3;
+	uint8_t		islogical	:1;
+	uint8_t		logdrv;
+	uint8_t		channel;
+	uint8_t		target;
+	uint8_t		queuetag;
+	uint8_t		queueaction;
+	uint8_t		cdb[10];
+	uint8_t		cdblen;
+	uint8_t		reqsenselen;
+	uint8_t		reqsensearea[MAX_REQ_SENSE_LEN];
+	uint8_t		numsge;
+	uint8_t		scsistatus;
+	uint32_t	dataxferaddr;
+	uint32_t	dataxferlen;
+} __attribute__ ((packed)) mraid_passthru_t;
+
+typedef struct {
+
+	uint32_t		dataxferaddr_lo;
+	uint32_t		dataxferaddr_hi;
+	mraid_passthru_t	pthru32;
+
+} __attribute__ ((packed)) mega_passthru64_t;
+
+/**
+ * mraid_epassthru_t - passthru structure to issue commands to physical devices
+ * @timeout		: command timeout, 0=6sec, 1=60sec, 2=10min, 3=3hr
+ * @ars			: set if ARS required after check condition
+ * @rsvd1		: reserved field
+ * @cd_rom		: (?)
+ * @rsvd2		: reserved field
+ * @islogical		: set if command meant for logical devices
+ * @logdrv		: logical drive number if command for LD
+ * @channel		: Channel on which physical device is located
+ * @target		: SCSI target of the device
+ * @queuetag		: unused
+ * @queueaction		: unused
+ * @cdblen		: length of the CDB
+ * @rsvd3		: reserved field
+ * @cdb			: SCSI CDB
+ * @numsge		: number of scatter-gather elements in the sg list
+ * @status		: SCSI status of the command completed.
+ * @reqsenselen		: amount of request sense data to be returned
+ * @reqsensearea	: Sense information buffer
+ * @rsvd4		: reserved field
+ * @dataxferaddr	: DMA data transfer address
+ * @dataxferlen		: amount of the data to be transferred.
+ */
+typedef struct {
+	uint8_t		timeout		:3;
+	uint8_t		ars		:1;
+	uint8_t		rsvd1		:1;
+	uint8_t		cd_rom		:1;
+	uint8_t		rsvd2		:1;
+	uint8_t		islogical	:1;
+	uint8_t		logdrv;
+	uint8_t		channel;
+	uint8_t		target;
+	uint8_t		queuetag;
+	uint8_t		queueaction;
+	uint8_t		cdblen;
+	uint8_t		rsvd3;
+	uint8_t		cdb[16];
+	uint8_t		numsge;
+	uint8_t		status;
+	uint8_t		reqsenselen;
+	uint8_t		reqsensearea[MAX_REQ_SENSE_LEN];
+	uint8_t		rsvd4;
+	uint32_t	dataxferaddr;
+	uint32_t	dataxferlen;
+} __attribute__ ((packed)) mraid_epassthru_t;
+
+
+/**
+ * mraid_pinfo_t - product info, static information about the controller
+ * @data_size		: current size in bytes (not including resvd)
+ * @config_signature	: Current value is 0x00282008
+ * @fw_version		: Firmware version
+ * @bios_version	: version of the BIOS
+ * @product_name	: Name given to the controller
+ * @max_commands	: Maximum concurrent commands supported
+ * @nchannels		: Number of SCSI Channels detected
+ * @fc_loop_present	: Number of Fibre Loops detected
+ * @mem_type		: EDO, FPM, SDRAM etc
+ * @signature		:
+ * @dram_size		: In terms of MB
+ * @subsysid		: device PCI subsystem ID
+ * @subsysvid		: device PCI subsystem vendor ID
+ * @notify_counters	:
+ * @pad1k		: 135 + 889 resvd = 1024 total size
+ *
+ * This structures holds the information about the controller which is not
+ * expected to change dynamically.
+ *
+ * The current value of config signature is 0x00282008:
+ * 0x28 = MAX_LOGICAL_DRIVES,
+ * 0x20 = Number of stripes and
+ * 0x08 = Number of spans
+ */
+typedef struct {
+	uint32_t	data_size;
+	uint32_t	config_signature;
+	uint8_t		fw_version[16];
+	uint8_t		bios_version[16];
+	uint8_t		product_name[80];
+	uint8_t		max_commands;
+	uint8_t		nchannels;
+	uint8_t		fc_loop_present;
+	uint8_t		mem_type;
+	uint32_t	signature;
+	uint16_t	dram_size;
+	uint16_t	subsysid;
+	uint16_t	subsysvid;
+	uint8_t		notify_counters;
+	uint8_t		pad1k[889];
+} __attribute__ ((packed)) mraid_pinfo_t;
+
+
+/**
+ * mraid_notify_t - the notification structure
+ * @global_counter		: Any change increments this counter
+ * @param_counter		: Indicates any params changed
+ * @param_id			: Param modified - defined below
+ * @param_val			: New val of last param modified
+ * @write_config_counter	: write config occurred
+ * @write_config_rsvd		:
+ * @ldrv_op_counter		: Indicates ldrv op started/completed
+ * @ldrv_opid			: ldrv num
+ * @ldrv_opcmd			: ldrv operation - defined below
+ * @ldrv_opstatus		: status of the operation
+ * @ldrv_state_counter		: Indicates change of ldrv state
+ * @ldrv_state_id		: ldrv num
+ * @ldrv_state_new		: New state
+ * @ldrv_state_old		: old state
+ * @pdrv_state_counter		: Indicates change of ldrv state
+ * @pdrv_state_id		: pdrv id
+ * @pdrv_state_new		: New state
+ * @pdrv_state_old		: old state
+ * @pdrv_fmt_counter		: Indicates pdrv format started/over
+ * @pdrv_fmt_id			: pdrv id
+ * @pdrv_fmt_val		: format started/over
+ * @pdrv_fmt_rsvd		:
+ * @targ_xfer_counter		: Indicates SCSI-2 Xfer rate change
+ * @targ_xfer_id		: pdrv Id
+ * @targ_xfer_val		: new Xfer params of last pdrv
+ * @targ_xfer_rsvd		:
+ * @fcloop_id_chg_counter	: Indicates loopid changed
+ * @fcloopid_pdrvid		: pdrv id
+ * @fcloop_id0			: loopid on fc loop 0
+ * @fcloop_id1			: loopid on fc loop 1
+ * @fcloop_state_counter	: Indicates loop state changed
+ * @fcloop_state0		: state of fc loop 0
+ * @fcloop_state1		: state of fc loop 1
+ * @fcloop_state_rsvd		:
+ */
+typedef struct {
+	uint32_t	global_counter;
+	uint8_t		param_counter;
+	uint8_t		param_id;
+	uint16_t	param_val;
+	uint8_t		write_config_counter;
+	uint8_t		write_config_rsvd[3];
+	uint8_t		ldrv_op_counter;
+	uint8_t		ldrv_opid;
+	uint8_t		ldrv_opcmd;
+	uint8_t		ldrv_opstatus;
+	uint8_t		ldrv_state_counter;
+	uint8_t		ldrv_state_id;
+	uint8_t		ldrv_state_new;
+	uint8_t		ldrv_state_old;
+	uint8_t		pdrv_state_counter;
+	uint8_t		pdrv_state_id;
+	uint8_t		pdrv_state_new;
+	uint8_t		pdrv_state_old;
+	uint8_t		pdrv_fmt_counter;
+	uint8_t		pdrv_fmt_id;
+	uint8_t		pdrv_fmt_val;
+	uint8_t		pdrv_fmt_rsvd;
+	uint8_t		targ_xfer_counter;
+	uint8_t		targ_xfer_id;
+	uint8_t		targ_xfer_val;
+	uint8_t		targ_xfer_rsvd;
+	uint8_t		fcloop_id_chg_counter;
+	uint8_t		fcloopid_pdrvid;
+	uint8_t		fcloop_id0;
+	uint8_t		fcloop_id1;
+	uint8_t		fcloop_state_counter;
+	uint8_t		fcloop_state0;
+	uint8_t		fcloop_state1;
+	uint8_t		fcloop_state_rsvd;
+} __attribute__ ((packed)) mraid_notify_t;
+
+
+/**
+ * mraid_inquiry3_t - enquiry for device information
+ *
+ * @data_size		: current size in bytes (not including resvd)
+ * @notify		:
+ * @notify_rsvd		:
+ * @rebuild_rate	: rebuild rate (0% - 100%)
+ * @cache_flush_int	: cache flush interval in seconds
+ * @sense_alert		:
+ * @drive_insert_count	: drive insertion count
+ * @battery_status	:
+ * @num_ldrv		: no. of Log Drives configured
+ * @recon_state		: state of reconstruct
+ * @ldrv_op_status	: logdrv Status
+ * @ldrv_size		: size of each log drv
+ * @ldrv_prop		:
+ * @ldrv_state		: state of log drives
+ * @pdrv_state		: state of phys drvs.
+ * @pdrv_format		:
+ * @targ_xfer		: phys device transfer rate
+ * @pad1k		: 761 + 263reserved = 1024 bytes total size
+ */
+#define MAX_NOTIFY_SIZE		0x80
+#define CUR_NOTIFY_SIZE		sizeof(mraid_notify_t)
+
+typedef struct {
+	uint32_t	data_size;
+
+	mraid_notify_t	notify;
+
+	uint8_t		notify_rsvd[MAX_NOTIFY_SIZE - CUR_NOTIFY_SIZE];
+
+	uint8_t		rebuild_rate;
+	uint8_t		cache_flush_int;
+	uint8_t		sense_alert;
+	uint8_t		drive_insert_count;
+
+	uint8_t		battery_status;
+	uint8_t		num_ldrv;
+	uint8_t		recon_state[MAX_LOGICAL_DRIVES_40LD / 8];
+	uint16_t	ldrv_op_status[MAX_LOGICAL_DRIVES_40LD / 8];
+
+	uint32_t	ldrv_size[MAX_LOGICAL_DRIVES_40LD];
+	uint8_t		ldrv_prop[MAX_LOGICAL_DRIVES_40LD];
+	uint8_t		ldrv_state[MAX_LOGICAL_DRIVES_40LD];
+	uint8_t		pdrv_state[FC_MAX_PHYSICAL_DEVICES];
+	uint16_t	pdrv_format[FC_MAX_PHYSICAL_DEVICES / 16];
+
+	uint8_t		targ_xfer[80];
+	uint8_t		pad1k[263];
+} __attribute__ ((packed)) mraid_inquiry3_t;
+
+
+/**
+ * mraid_adapinfo_t - information about the adapter
+ * @max_commands		: max concurrent commands supported
+ * @rebuild_rate		: rebuild rate - 0% thru 100%
+ * @max_targ_per_chan		: max targ per channel
+ * @nchannels			: number of channels on HBA
+ * @fw_version			: firmware version
+ * @age_of_flash		: number of times FW has been flashed
+ * @chip_set_value		: contents of 0xC0000832
+ * @dram_size			: in MB
+ * @cache_flush_interval	: in seconds
+ * @bios_version		:
+ * @board_type			:
+ * @sense_alert			:
+ * @write_config_count		: increase with every configuration change
+ * @drive_inserted_count	: increase with every drive inserted
+ * @inserted_drive		: channel:Id of inserted drive
+ * @battery_status		: bit 0: battery module missing
+ *				bit 1: VBAD
+ *				bit 2: temperature high
+ *				bit 3: battery pack missing
+ *				bit 4,5:
+ *					00 - charge complete
+ *					01 - fast charge in progress
+ *					10 - fast charge fail
+ *					11 - undefined
+ *				bit 6: counter > 1000
+ *				bit 7: Undefined
+ * @dec_fault_bus_info		:
+ */
+typedef struct {
+	uint8_t		max_commands;
+	uint8_t		rebuild_rate;
+	uint8_t		max_targ_per_chan;
+	uint8_t		nchannels;
+	uint8_t		fw_version[4];
+	uint16_t	age_of_flash;
+	uint8_t		chip_set_value;
+	uint8_t		dram_size;
+	uint8_t		cache_flush_interval;
+	uint8_t		bios_version[4];
+	uint8_t		board_type;
+	uint8_t		sense_alert;
+	uint8_t		write_config_count;
+	uint8_t		battery_status;
+	uint8_t		dec_fault_bus_info;
+} __attribute__ ((packed)) mraid_adapinfo_t;
+
+
+/**
+ * mraid_ldrv_info_t - information about the logical drives
+ * @nldrv	: Number of logical drives configured
+ * @rsvd	:
+ * @size	: size of each logical drive
+ * @prop	:
+ * @state	: state of each logical drive
+ */
+typedef struct {
+	uint8_t		nldrv;
+	uint8_t		rsvd[3];
+	uint32_t	size[MAX_LOGICAL_DRIVES_8LD];
+	uint8_t		prop[MAX_LOGICAL_DRIVES_8LD];
+	uint8_t		state[MAX_LOGICAL_DRIVES_8LD];
+} __attribute__ ((packed)) mraid_ldrv_info_t;
+
+
+/**
+ * mraid_pdrv_info_t - information about the physical drives
+ * @pdrv_state	: state of each physical drive
+ */
+typedef struct {
+	uint8_t		pdrv_state[MBOX_MAX_PHYSICAL_DRIVES];
+	uint8_t		rsvd;
+} __attribute__ ((packed)) mraid_pdrv_info_t;
+
+
+/**
+ * mraid_inquiry_t - RAID inquiry, mailbox command 0x05
+ * @mraid_adapinfo_t	: adapter information
+ * @mraid_ldrv_info_t	: logical drives information
+ * @mraid_pdrv_info_t	: physical drives information
+ */
+typedef struct {
+	mraid_adapinfo_t	adapter_info;
+	mraid_ldrv_info_t	logdrv_info;
+	mraid_pdrv_info_t	pdrv_info;
+} __attribute__ ((packed)) mraid_inquiry_t;
+
+
+/**
+ * mraid_extinq_t - RAID extended inquiry, mailbox command 0x04
+ *
+ * @raid_inq		: raid inquiry
+ * @phys_drv_format	:
+ * @stack_attn		:
+ * @modem_status	:
+ * @rsvd		:
+ */
+typedef struct {
+	mraid_inquiry_t	raid_inq;
+	uint16_t	phys_drv_format[MAX_MBOX_CHANNELS];
+	uint8_t		stack_attn;
+	uint8_t		modem_status;
+	uint8_t		rsvd[2];
+} __attribute__ ((packed)) mraid_extinq_t;
+
+
+/**
+ * adap_device_t - device information
+ * @channel	: channel fpor the device
+ * @target	: target ID of the device
+ */
+typedef struct {
+	uint8_t		channel;
+	uint8_t		target;
+}__attribute__ ((packed)) adap_device_t;
+
+
+/**
+ * adap_span_40ld_t - 40LD span
+ * @start_blk	: starting block
+ * @num_blks	: number of blocks
+ */
+typedef struct {
+	uint32_t	start_blk;
+	uint32_t	num_blks;
+	adap_device_t	device[MAX_ROW_SIZE_40LD];
+}__attribute__ ((packed)) adap_span_40ld_t;
+
+
+/**
+ * adap_span_8ld_t - 8LD span
+ * @start_blk	: starting block
+ * @num_blks	: number of blocks
+ */
+typedef struct {
+	uint32_t	start_blk;
+	uint32_t	num_blks;
+	adap_device_t	device[MAX_ROW_SIZE_8LD];
+}__attribute__ ((packed)) adap_span_8ld_t;
+
+
+/**
+ * logdrv_param_t - logical drives parameters
+ *
+ * @span_depth	: total number of spans
+ * @level	: RAID level
+ * @read_ahead	: read ahead, no read ahead, adaptive read ahead
+ * @stripe_sz	: encoded stripe size
+ * @status	: status of the logical drive
+ * @write_mode	: write mode, write_through/write_back
+ * @direct_io	: direct io or through cache
+ * @row_size	: number of stripes in a row
+ */
+typedef struct {
+	uint8_t		span_depth;
+	uint8_t		level;
+	uint8_t		read_ahead;
+	uint8_t		stripe_sz;
+	uint8_t		status;
+	uint8_t		write_mode;
+	uint8_t		direct_io;
+	uint8_t		row_size;
+} __attribute__ ((packed)) logdrv_param_t;
+
+
+/**
+ * logdrv_40ld_t - logical drive definition for 40LD controllers
+ * @lparam	: logical drives parameters
+ * @span	: span
+ */
+typedef struct {
+	logdrv_param_t		lparam;
+	adap_span_40ld_t	span[SPAN_DEPTH_8_SPANS];
+}__attribute__ ((packed)) logdrv_40ld_t;
+
+
+/**
+ * logdrv_8ld_span8_t - logical drive definition for 8LD controllers
+ * @lparam	: logical drives parameters
+ * @span	: span
+ *
+ * 8-LD logical drive with up to 8 spans
+ */
+typedef struct {
+	logdrv_param_t	lparam;
+	adap_span_8ld_t	span[SPAN_DEPTH_8_SPANS];
+}__attribute__ ((packed)) logdrv_8ld_span8_t;
+
+
+/**
+ * logdrv_8ld_span4_t - logical drive definition for 8LD controllers
+ * @lparam	: logical drives parameters
+ * @span	: span
+ *
+ * 8-LD logical drive with up to 4 spans
+ */
+typedef struct {
+	logdrv_param_t	lparam;
+	adap_span_8ld_t	span[SPAN_DEPTH_4_SPANS];
+}__attribute__ ((packed)) logdrv_8ld_span4_t;
+
+
+/**
+ * phys_drive_t - physical device information
+ * @type	: Type of the device
+ * @cur_status	: current status of the device
+ * @tag_depth	: Level of tagging
+ * @sync_neg	: sync negotiation - ENABLE or DISABLE
+ * @size	: configurable size in terms of 512 byte
+ */
+typedef struct {
+	uint8_t		type;
+	uint8_t		cur_status;
+	uint8_t		tag_depth;
+	uint8_t		sync_neg;
+	uint32_t	size;
+}__attribute__ ((packed)) phys_drive_t;
+
+
+/**
+ * disk_array_40ld_t - disk array for 40LD controllers
+ * @numldrv	: number of logical drives
+ * @resvd	:
+ * @ldrv	: logical drives information
+ * @pdrv	: physical drives information
+ */
+typedef struct {
+	uint8_t		numldrv;
+	uint8_t		resvd[3];
+	logdrv_40ld_t	ldrv[MAX_LOGICAL_DRIVES_40LD];
+	phys_drive_t	pdrv[MBOX_MAX_PHYSICAL_DRIVES];
+}__attribute__ ((packed)) disk_array_40ld_t;
+
+
+/**
+ * disk_array_8ld_span8_t - disk array for 8LD controllers
+ * @numldrv	: number of logical drives
+ * @resvd	:
+ * @ldrv	: logical drives information
+ * @pdrv	: physical drives information
+ *
+ * Disk array for 8LD logical drives with up to 8 spans
+ */
+typedef struct {
+	uint8_t			numldrv;
+	uint8_t			resvd[3];
+	logdrv_8ld_span8_t	ldrv[MAX_LOGICAL_DRIVES_8LD];
+	phys_drive_t		pdrv[MBOX_MAX_PHYSICAL_DRIVES];
+}__attribute__ ((packed)) disk_array_8ld_span8_t;
+
+
+/**
+ * disk_array_8ld_span4_t - disk array for 8LD controllers
+ * @numldrv	: number of logical drives
+ * @resvd	:
+ * @ldrv	: logical drives information
+ * @pdrv	: physical drives information
+ *
+ * Disk array for 8LD logical drives with up to 4 spans
+ */
+typedef struct {
+	uint8_t			numldrv;
+	uint8_t			resvd[3];
+	logdrv_8ld_span4_t	ldrv[MAX_LOGICAL_DRIVES_8LD];
+	phys_drive_t		pdrv[MBOX_MAX_PHYSICAL_DRIVES];
+}__attribute__ ((packed)) disk_array_8ld_span4_t;
+
+
+/**
+ * struct private_bios_data - bios private data for boot devices
+ * @geometry	: bits 0-3 - BIOS geometry, 0x0001 - 1GB, 0x0010 - 2GB,
+ *		0x1000 - 8GB, Others values are invalid
+ * @unused	: bits 4-7 are unused
+ * @boot_drv	: logical drive set as boot drive, 0..7 - for 8LD cards,
+ * 		0..39 - for 40LD cards
+ * @cksum	: 0-(sum of first 13 bytes of this structure)
+ */
+struct private_bios_data {
+	uint8_t		geometry	:4;
+	uint8_t		unused		:4;
+	uint8_t		boot_drv;
+	uint8_t		rsvd[12];
+	uint16_t	cksum;
+} __attribute__ ((packed));
+
+
+/**
+ * mbox_sgl64 - 64-bit scatter list for mailbox based controllers
+ * @address	: address of the buffer
+ * @length	: data transfer length
+ */
+typedef struct {
+	uint64_t	address;
+	uint32_t	length;
+} __attribute__ ((packed)) mbox_sgl64;
+
+/**
+ * mbox_sgl32 - 32-bit scatter list for mailbox based controllers
+ * @address	: address of the buffer
+ * @length	: data transfer length
+ */
+typedef struct {
+	uint32_t	address;
+	uint32_t	length;
+} __attribute__ ((packed)) mbox_sgl32;
+
+#endif		// _MRAID_MBOX_DEFS_H_
+
+/* vim: set ts=8 sw=8 tw=78: */
diff --git a/src/kernel/linux/v4.14/drivers/scsi/megaraid/mega_common.h b/src/kernel/linux/v4.14/drivers/scsi/megaraid/mega_common.h
new file mode 100644
index 0000000..1d037ed
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/megaraid/mega_common.h
@@ -0,0 +1,290 @@
+/*
+ *
+ *			Linux MegaRAID device driver
+ *
+ * Copyright (c) 2003-2004  LSI Logic Corporation.
+ *
+ *	   This program is free software; you can redistribute it and/or
+ *	   modify it under the terms of the GNU General Public License
+ *	   as published by the Free Software Foundation; either version
+ *	   2 of the License, or (at your option) any later version.
+ *
+ * FILE		: mega_common.h
+ *
+ * Libaray of common routine used by all low-level megaraid drivers
+ */
+
+#ifndef _MEGA_COMMON_H_
+#define _MEGA_COMMON_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/blkdev.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+
+#define LSI_MAX_CHANNELS		16
+#define LSI_MAX_LOGICAL_DRIVES_64LD	(64+1)
+
+#define HBA_SIGNATURE_64_BIT		0x299
+#define PCI_CONF_AMISIG64		0xa4
+
+#define MEGA_SCSI_INQ_EVPD		1
+#define MEGA_INVALID_FIELD_IN_CDB	0x24
+
+
+/**
+ * scb_t - scsi command control block
+ * @ccb			: command control block for individual driver
+ * @list		: list of control blocks
+ * @gp			: general purpose field for LLDs
+ * @sno			: all SCBs have a serial number
+ * @scp			: associated scsi command
+ * @state		: current state of scb
+ * @dma_dir		: direction of data transfer
+ * @dma_type		: transfer with sg list, buffer, or no data transfer
+ * @dev_channel		: actual channel on the device
+ * @dev_target		: actual target on the device
+ * @status		: completion status
+ *
+ * This is our central data structure to issue commands the each driver.
+ * Driver specific data structures are maintained in the ccb field.
+ * scb provides a field 'gp', which can be used by LLD for its own purposes
+ *
+ * dev_channel and dev_target must be initialized with the actual channel and
+ * target on the controller.
+ */
+typedef struct {
+	caddr_t			ccb;
+	struct list_head	list;
+	unsigned long		gp;
+	unsigned int		sno;
+	struct scsi_cmnd	*scp;
+	uint32_t		state;
+	uint32_t		dma_direction;
+	uint32_t		dma_type;
+	uint16_t		dev_channel;
+	uint16_t		dev_target;
+	uint32_t		status;
+} scb_t;
+
+/*
+ * SCB states as it transitions from one state to another
+ */
+#define SCB_FREE	0x0000	/* on the free list */
+#define SCB_ACTIVE	0x0001	/* off the free list */
+#define SCB_PENDQ	0x0002	/* on the pending queue */
+#define SCB_ISSUED	0x0004	/* issued - owner f/w */
+#define SCB_ABORT	0x0008	/* Got an abort for this one */
+#define SCB_RESET	0x0010	/* Got a reset for this one */
+
+/*
+ * DMA types for scb
+ */
+#define MRAID_DMA_NONE	0x0000	/* no data transfer for this command */
+#define MRAID_DMA_WSG	0x0001	/* data transfer using a sg list */
+#define MRAID_DMA_WBUF	0x0002	/* data transfer using a contiguous buffer */
+
+
+/**
+ * struct adapter_t - driver's initialization structure
+ * @aram dpc_h			: tasklet handle
+ * @pdev			: pci configuration pointer for kernel
+ * @host			: pointer to host structure of mid-layer
+ * @lock			: synchronization lock for mid-layer and driver
+ * @quiescent			: driver is quiescent for now.
+ * @outstanding_cmds		: number of commands pending in the driver
+ * @kscb_list			: pointer to the bulk of SCBs pointers for IO
+ * @kscb_pool			: pool of free scbs for IO
+ * @kscb_pool_lock		: lock for pool of free scbs
+ * @pend_list			: pending commands list
+ * @pend_list_lock		: exclusion lock for pending commands list
+ * @completed_list		: list of completed commands
+ * @completed_list_lock		: exclusion lock for list of completed commands
+ * @sglen			: max sg elements supported
+ * @device_ids			: to convert kernel device addr to our devices.
+ * @raid_device			: raid adapter specific pointer
+ * @max_channel			: maximum channel number supported - inclusive
+ * @max_target			: max target supported - inclusive
+ * @max_lun			: max lun supported - inclusive
+ * @unique_id			: unique identifier for each adapter
+ * @irq				: IRQ for this adapter
+ * @ito				: internal timeout value, (-1) means no timeout
+ * @ibuf			: buffer to issue internal commands
+ * @ibuf_dma_h			: dma handle for the above buffer
+ * @uscb_list			: SCB pointers for user cmds, common mgmt module
+ * @uscb_pool			: pool of SCBs for user commands
+ * @uscb_pool_lock		: exclusion lock for these SCBs
+ * @max_cmds			: max outstanding commands
+ * @fw_version			: firmware version
+ * @bios_version		: bios version
+ * @max_cdb_sz			: biggest CDB size supported.
+ * @ha				: is high availability present - clustering
+ * @init_id			: initiator ID, the default value should be 7
+ * @max_sectors			: max sectors per request
+ * @cmd_per_lun			: max outstanding commands per LUN
+ * @being_detached		: set when unloading, no more mgmt calls
+ *
+ *
+ * mraid_setup_device_map() can be called anytime after the device map is
+ * available and MRAID_GET_DEVICE_MAP() can be called whenever the mapping is
+ * required, usually from LLD's queue entry point. The formar API sets up the
+ * MRAID_IS_LOGICAL(adapter_t *, struct scsi_cmnd *) to find out if the
+ * device in question is a logical drive.
+ *
+ * quiescent flag should be set by the driver if it is not accepting more
+ * commands
+ *
+ * NOTE: The fields of this structures are placed to minimize cache misses
+ */
+
+// amount of space required to store the bios and firmware version strings
+#define VERSION_SIZE	16
+
+typedef struct {
+	struct tasklet_struct	dpc_h;
+	struct pci_dev		*pdev;
+	struct Scsi_Host	*host;
+	spinlock_t		lock;
+	uint8_t			quiescent;
+	int			outstanding_cmds;
+	scb_t			*kscb_list;
+	struct list_head	kscb_pool;
+	spinlock_t		kscb_pool_lock;
+	struct list_head	pend_list;
+	spinlock_t		pend_list_lock;
+	struct list_head	completed_list;
+	spinlock_t		completed_list_lock;
+	uint16_t		sglen;
+	int			device_ids[LSI_MAX_CHANNELS]
+					[LSI_MAX_LOGICAL_DRIVES_64LD];
+	caddr_t			raid_device;
+	uint8_t			max_channel;
+	uint16_t		max_target;
+	uint8_t			max_lun;
+
+	uint32_t		unique_id;
+	int			irq;
+	uint8_t			ito;
+	caddr_t			ibuf;
+	dma_addr_t		ibuf_dma_h;
+	scb_t			*uscb_list;
+	struct list_head	uscb_pool;
+	spinlock_t		uscb_pool_lock;
+	int			max_cmds;
+	uint8_t			fw_version[VERSION_SIZE];
+	uint8_t			bios_version[VERSION_SIZE];
+	uint8_t			max_cdb_sz;
+	uint8_t			ha;
+	uint16_t		init_id;
+	uint16_t		max_sectors;
+	uint16_t		cmd_per_lun;
+	atomic_t		being_detached;
+} adapter_t;
+
+#define SCSI_FREE_LIST_LOCK(adapter)	(&adapter->kscb_pool_lock)
+#define USER_FREE_LIST_LOCK(adapter)	(&adapter->uscb_pool_lock)
+#define PENDING_LIST_LOCK(adapter)	(&adapter->pend_list_lock)
+#define COMPLETED_LIST_LOCK(adapter)	(&adapter->completed_list_lock)
+
+
+// conversion from scsi command
+#define SCP2HOST(scp)			(scp)->device->host	// to host
+#define SCP2HOSTDATA(scp)		SCP2HOST(scp)->hostdata	// to soft state
+#define SCP2CHANNEL(scp)		(scp)->device->channel	// to channel
+#define SCP2TARGET(scp)			(scp)->device->id	// to target
+#define SCP2LUN(scp)			(u32)(scp)->device->lun	// to LUN
+
+// generic macro to convert scsi command and host to controller's soft state
+#define SCSIHOST2ADAP(host)	(((caddr_t *)(host->hostdata))[0])
+#define SCP2ADAPTER(scp)	(adapter_t *)SCSIHOST2ADAP(SCP2HOST(scp))
+
+
+#define MRAID_IS_LOGICAL(adp, scp)	\
+	(SCP2CHANNEL(scp) == (adp)->max_channel) ? 1 : 0
+
+#define MRAID_IS_LOGICAL_SDEV(adp, sdev)	\
+	(sdev->channel == (adp)->max_channel) ? 1 : 0
+
+/**
+ * MRAID_GET_DEVICE_MAP - device ids
+ * @adp			: adapter's soft state
+ * @scp			: mid-layer scsi command pointer
+ * @p_chan		: physical channel on the controller
+ * @target		: target id of the device or logical drive number
+ * @islogical		: set if the command is for the logical drive
+ *
+ * Macro to retrieve information about device class, logical or physical and
+ * the corresponding physical channel and target or logical drive number
+ */
+#define MRAID_GET_DEVICE_MAP(adp, scp, p_chan, target, islogical)	\
+	/*								\
+	 * Is the request coming for the virtual channel		\
+	 */								\
+	islogical = MRAID_IS_LOGICAL(adp, scp);				\
+									\
+	/*								\
+	 * Get an index into our table of drive ids mapping		\
+	 */								\
+	if (islogical) {						\
+		p_chan = 0xFF;						\
+		target =						\
+		(adp)->device_ids[(adp)->max_channel][SCP2TARGET(scp)];	\
+	}								\
+	else {								\
+		p_chan = ((adp)->device_ids[SCP2CHANNEL(scp)]		\
+					[SCP2TARGET(scp)] >> 8) & 0xFF;	\
+		target = ((adp)->device_ids[SCP2CHANNEL(scp)]		\
+					[SCP2TARGET(scp)] & 0xFF);	\
+	}
+
+/*
+ * ### Helper routines ###
+ */
+#define LSI_DBGLVL mraid_debug_level	// each LLD must define a global
+ 					// mraid_debug_level
+
+#ifdef DEBUG
+#if defined (_ASSERT_PANIC)
+#define ASSERT_ACTION	panic
+#else
+#define ASSERT_ACTION	printk
+#endif
+
+#define ASSERT(expression)						\
+	if (!(expression)) {						\
+	ASSERT_ACTION("assertion failed:(%s), file: %s, line: %d:%s\n",	\
+			#expression, __FILE__, __LINE__, __func__);	\
+	}
+#else
+#define ASSERT(expression)
+#endif
+
+/**
+ * struct mraid_pci_blk - structure holds DMA memory block info
+ * @vaddr		: virtual address to a memory block
+ * @dma_addr		: DMA handle to a memory block
+ *
+ * This structure is filled up for the caller. It is the responsibilty of the
+ * caller to allocate this array big enough to store addresses for all
+ * requested elements
+ */
+struct mraid_pci_blk {
+	caddr_t		vaddr;
+	dma_addr_t	dma_addr;
+};
+
+#endif // _MEGA_COMMON_H_
+
+// vim: set ts=8 sw=8 tw=78:
diff --git a/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_ioctl.h b/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_ioctl.h
new file mode 100644
index 0000000..05f6e4e
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_ioctl.h
@@ -0,0 +1,300 @@
+/*
+ *
+ *			Linux MegaRAID device driver
+ *
+ * Copyright (c) 2003-2004  LSI Logic Corporation.
+ *
+ *	   This program is free software; you can redistribute it and/or
+ *	   modify it under the terms of the GNU General Public License
+ *	   as published by the Free Software Foundation; either version
+ *	   2 of the License, or (at your option) any later version.
+ *
+ * FILE		: megaraid_ioctl.h
+ *
+ * Definitions to interface with user level applications
+ */
+
+#ifndef _MEGARAID_IOCTL_H_
+#define _MEGARAID_IOCTL_H_
+
+#include <linux/types.h>
+#include <linux/semaphore.h>
+
+#include "mbox_defs.h"
+
+/*
+ * console messages debug levels
+ */
+#define	CL_ANN		0	/* print unconditionally, announcements */
+#define CL_DLEVEL1	1	/* debug level 1, informative */
+#define CL_DLEVEL2	2	/* debug level 2, verbose */
+#define CL_DLEVEL3	3	/* debug level 3, very verbose */
+
+/**
+ * con_log() - console log routine
+ * @level		: indicates the severity of the message.
+ * @fmt			: format string
+ *
+ * con_log displays the error messages on the console based on the current
+ * debug level. Also it attaches the appropriate kernel severity level with
+ * the message.
+ */
+#define	con_log(level, fmt) if (LSI_DBGLVL >= level) printk fmt;
+
+/*
+ * Definitions & Declarations needed to use common management module
+ */
+
+#define MEGAIOC_MAGIC		'm'
+#define MEGAIOCCMD		_IOWR(MEGAIOC_MAGIC, 0, mimd_t)
+
+#define MEGAIOC_QNADAP		'm'	/* Query # of adapters		*/
+#define MEGAIOC_QDRVRVER	'e'	/* Query driver version		*/
+#define MEGAIOC_QADAPINFO   	'g'	/* Query adapter information	*/
+
+#define USCSICMD		0x80
+#define UIOC_RD			0x00001
+#define UIOC_WR			0x00002
+
+#define MBOX_CMD		0x00000
+#define GET_DRIVER_VER		0x10000
+#define GET_N_ADAP		0x20000
+#define GET_ADAP_INFO		0x30000
+#define GET_CAP			0x40000
+#define GET_STATS		0x50000
+#define GET_IOCTL_VERSION	0x01
+
+#define EXT_IOCTL_SIGN_SZ	16
+#define EXT_IOCTL_SIGN		"$$_EXTD_IOCTL_$$"
+
+#define	MBOX_LEGACY		0x00		/* ioctl has legacy mbox*/
+#define MBOX_HPE		0x01		/* ioctl has hpe mbox	*/
+
+#define	APPTYPE_MIMD		0x00		/* old existing apps	*/
+#define APPTYPE_UIOC		0x01		/* new apps using uioc	*/
+
+#define IOCTL_ISSUE		0x00000001	/* Issue ioctl		*/
+#define IOCTL_ABORT		0x00000002	/* Abort previous ioctl	*/
+
+#define DRVRTYPE_MBOX		0x00000001	/* regular mbox driver	*/
+#define DRVRTYPE_HPE		0x00000002	/* new hpe driver	*/
+
+#define MKADAP(adapno)	(MEGAIOC_MAGIC << 8 | (adapno) )
+#define GETADAP(mkadap)	((mkadap) ^ MEGAIOC_MAGIC << 8)
+
+#define MAX_DMA_POOLS		5		/* 4k, 8k, 16k, 32k, 64k*/
+
+
+/**
+ * struct uioc_t - the common ioctl packet structure
+ *
+ * @signature	: Must be "$$_EXTD_IOCTL_$$"
+ * @mb_type	: Type of the mail box (MB_LEGACY or MB_HPE)
+ * @app_type	: Type of the issuing application (existing or new)
+ * @opcode	: Opcode of the command
+ * @adapno	: Adapter number
+ * @cmdbuf	: Pointer to buffer - can point to mbox or plain data buffer
+ * @xferlen	: xferlen for DCMD and non mailbox commands
+ * @data_dir	: Direction of the data transfer
+ * @status	: Status from the driver
+ * @reserved	: reserved bytes for future expansion
+ *
+ * @user_data	: user data transfer address is saved in this
+ * @user_data_len: length of the data buffer sent by user app
+ * @user_pthru	: user passthru address is saves in this (null if DCMD)
+ * @pthru32	: kernel address passthru (allocated per kioc)
+ * @pthru32_h	: physicall address of @pthru32
+ * @list	: for kioc free pool list maintenance
+ * @done	: call back routine for llds to call when kioc is completed
+ * @buf_vaddr	: dma pool buffer attached to kioc for data transfer
+ * @buf_paddr	: physical address of the dma pool buffer
+ * @pool_index	: index of the dma pool that @buf_vaddr is taken from
+ * @free_buf	: indicates if buffer needs to be freed after kioc completes
+ *
+ * Note		: All LSI drivers understand only this packet. Any other
+ *		: format sent by applications would be converted to this.
+ */
+typedef struct uioc {
+
+/* User Apps: */
+
+	uint8_t			signature[EXT_IOCTL_SIGN_SZ];
+	uint16_t		mb_type;
+	uint16_t		app_type;
+	uint32_t		opcode;
+	uint32_t		adapno;
+	uint64_t		cmdbuf;
+	uint32_t		xferlen;
+	uint32_t		data_dir;
+	int32_t			status;
+	uint8_t			reserved[128];
+
+/* Driver Data: */
+	void __user *		user_data;
+	uint32_t		user_data_len;
+
+	/* 64bit alignment */
+	uint32_t                pad_for_64bit_align;
+
+	mraid_passthru_t	__user *user_pthru;
+
+	mraid_passthru_t	*pthru32;
+	dma_addr_t		pthru32_h;
+
+	struct list_head	list;
+	void			(*done)(struct uioc*);
+
+	caddr_t			buf_vaddr;
+	dma_addr_t		buf_paddr;
+	int8_t			pool_index;
+	uint8_t			free_buf;
+
+	uint8_t			timedout;
+
+} __attribute__ ((aligned(1024),packed)) uioc_t;
+
+
+/**
+ * struct mraid_hba_info - information about the controller
+ *
+ * @pci_vendor_id		: PCI vendor id
+ * @pci_device_id		: PCI device id
+ * @subsystem_vendor_id		: PCI subsystem vendor id
+ * @subsystem_device_id		: PCI subsystem device id
+ * @baseport			: base port of hba memory
+ * @pci_bus			: PCI bus
+ * @pci_dev_fn			: PCI device/function values
+ * @irq				: interrupt vector for the device
+ *
+ * Extended information of 256 bytes about the controller. Align on the single
+ * byte boundary so that 32-bit applications can be run on 64-bit platform
+ * drivers withoug re-compilation.
+ * NOTE: reduce the number of reserved bytes whenever new field are added, so
+ * that total size of the structure remains 256 bytes.
+ */
+typedef struct mraid_hba_info {
+
+	uint16_t	pci_vendor_id;
+	uint16_t	pci_device_id;
+	uint16_t	subsys_vendor_id;
+	uint16_t	subsys_device_id;
+
+	uint64_t	baseport;
+	uint8_t		pci_bus;
+	uint8_t		pci_dev_fn;
+	uint8_t		pci_slot;
+	uint8_t		irq;
+
+	uint32_t	unique_id;
+	uint32_t	host_no;
+
+	uint8_t		num_ldrv;
+} __attribute__ ((aligned(256), packed)) mraid_hba_info_t;
+
+
+/**
+ * mcontroller	: adapter info structure for old mimd_t apps
+ *
+ * @base	: base address
+ * @irq		: irq number
+ * @numldrv	: number of logical drives
+ * @pcibus	: pci bus
+ * @pcidev	: pci device
+ * @pcifun	: pci function
+ * @pciid	: pci id
+ * @pcivendor	: vendor id
+ * @pcislot	: slot number
+ * @uid		: unique id
+ */
+typedef struct mcontroller {
+
+	uint64_t	base;
+	uint8_t		irq;
+	uint8_t		numldrv;
+	uint8_t		pcibus;
+	uint16_t	pcidev;
+	uint8_t		pcifun;
+	uint16_t	pciid;
+	uint16_t	pcivendor;
+	uint8_t		pcislot;
+	uint32_t	uid;
+
+} __attribute__ ((packed)) mcontroller_t;
+
+
+/**
+ * mm_dmapool_t	: Represents one dma pool with just one buffer
+ *
+ * @vaddr	: Virtual address
+ * @paddr	: DMA physicall address
+ * @bufsize	: In KB - 4 = 4k, 8 = 8k etc.
+ * @handle	: Handle to the dma pool
+ * @lock	: lock to synchronize access to the pool
+ * @in_use	: If pool already in use, attach new block
+ */
+typedef struct mm_dmapool {
+	caddr_t		vaddr;
+	dma_addr_t	paddr;
+	uint32_t	buf_size;
+	struct dma_pool	*handle;
+	spinlock_t	lock;
+	uint8_t		in_use;
+} mm_dmapool_t;
+
+
+/**
+ * mraid_mmadp_t: Structure that drivers pass during (un)registration
+ *
+ * @unique_id		: Any unique id (usually PCI bus+dev+fn)
+ * @drvr_type		: megaraid or hpe (DRVRTYPE_MBOX or DRVRTYPE_HPE)
+ * @drv_data		: Driver specific; not touched by the common module
+ * @timeout		: timeout for issued kiocs
+ * @max_kioc		: Maximum ioctl packets acceptable by the lld
+ * @pdev		: pci dev; used for allocating dma'ble memory
+ * @issue_uioc		: Driver supplied routine to issue uioc_t commands
+ *			: issue_uioc(drvr_data, kioc, ISSUE/ABORT, uioc_done)
+ * @quiescent		: flag to indicate if ioctl can be issued to this adp
+ * @list		: attach with the global list of adapters
+ * @kioc_list		: block of mem for @max_kioc number of kiocs
+ * @kioc_pool		: pool of free kiocs
+ * @kioc_pool_lock	: protection for free pool
+ * @kioc_semaphore	: so as not to exceed @max_kioc parallel ioctls
+ * @mbox_list		: block of mem for @max_kioc number of mboxes
+ * @pthru_dma_pool	: DMA pool to allocate passthru packets
+ * @dma_pool_list	: array of dma pools
+ */
+
+typedef struct mraid_mmadp {
+
+/* Filled by driver */
+
+	uint32_t		unique_id;
+	uint32_t		drvr_type;
+	unsigned long		drvr_data;
+	uint16_t		timeout;
+	uint8_t			max_kioc;
+
+	struct pci_dev		*pdev;
+
+	int(*issue_uioc)(unsigned long, uioc_t *, uint32_t);
+
+/* Maintained by common module */
+	uint32_t		quiescent;
+
+	struct list_head	list;
+	uioc_t			*kioc_list;
+	struct list_head	kioc_pool;
+	spinlock_t		kioc_pool_lock;
+	struct semaphore	kioc_semaphore;
+
+	mbox64_t		*mbox_list;
+	struct dma_pool		*pthru_dma_pool;
+	mm_dmapool_t		dma_pool_list[MAX_DMA_POOLS];
+
+} mraid_mmadp_t;
+
+int mraid_mm_register_adp(mraid_mmadp_t *);
+int mraid_mm_unregister_adp(uint32_t);
+uint32_t mraid_mm_adapter_app_handle(uint32_t);
+
+#endif /* _MEGARAID_IOCTL_H_ */
diff --git a/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_mbox.c b/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_mbox.c
new file mode 100644
index 0000000..ec3c438
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_mbox.c
@@ -0,0 +1,4143 @@
+/*
+ *
+ *			Linux MegaRAID device driver
+ *
+ * Copyright (c) 2003-2004  LSI Logic Corporation.
+ *
+ *	   This program is free software; you can redistribute it and/or
+ *	   modify it under the terms of the GNU General Public License
+ *	   as published by the Free Software Foundation; either version
+ *	   2 of the License, or (at your option) any later version.
+ *
+ * FILE		: megaraid_mbox.c
+ * Version	: v2.20.5.1 (Nov 16 2006)
+ *
+ * Authors:
+ * 	Atul Mukker		<Atul.Mukker@lsi.com>
+ * 	Sreenivas Bagalkote	<Sreenivas.Bagalkote@lsi.com>
+ * 	Manoj Jose		<Manoj.Jose@lsi.com>
+ * 	Seokmann Ju
+ *
+ * List of supported controllers
+ *
+ * OEM	Product Name			VID	DID	SSVID	SSID
+ * ---	------------			---	---	----	----
+ * Dell PERC3/QC			101E	1960	1028	0471
+ * Dell PERC3/DC			101E	1960	1028	0493
+ * Dell PERC3/SC			101E	1960	1028	0475
+ * Dell PERC3/Di			1028	1960	1028	0123
+ * Dell PERC4/SC			1000	1960	1028	0520
+ * Dell PERC4/DC			1000	1960	1028	0518
+ * Dell PERC4/QC			1000	0407	1028	0531
+ * Dell PERC4/Di			1028	000F	1028	014A
+ * Dell PERC 4e/Si			1028	0013	1028	016c
+ * Dell PERC 4e/Di			1028	0013	1028	016d
+ * Dell PERC 4e/Di			1028	0013	1028	016e
+ * Dell PERC 4e/Di			1028	0013	1028	016f
+ * Dell PERC 4e/Di			1028	0013	1028	0170
+ * Dell PERC 4e/DC			1000	0408	1028	0002
+ * Dell PERC 4e/SC			1000	0408	1028	0001
+ *
+ *
+ * LSI MegaRAID SCSI 320-0		1000	1960	1000	A520
+ * LSI MegaRAID SCSI 320-1		1000	1960	1000	0520
+ * LSI MegaRAID SCSI 320-2		1000	1960	1000	0518
+ * LSI MegaRAID SCSI 320-0X		1000	0407	1000	0530
+ * LSI MegaRAID SCSI 320-2X		1000	0407	1000	0532
+ * LSI MegaRAID SCSI 320-4X		1000	0407	1000	0531
+ * LSI MegaRAID SCSI 320-1E		1000	0408	1000	0001
+ * LSI MegaRAID SCSI 320-2E		1000	0408	1000	0002
+ * LSI MegaRAID SATA 150-4		1000	1960	1000	4523
+ * LSI MegaRAID SATA 150-6		1000	1960	1000	0523
+ * LSI MegaRAID SATA 300-4X		1000	0409	1000	3004
+ * LSI MegaRAID SATA 300-8X		1000	0409	1000	3008
+ *
+ * INTEL RAID Controller SRCU42X	1000	0407	8086	0532
+ * INTEL RAID Controller SRCS16		1000	1960	8086	0523
+ * INTEL RAID Controller SRCU42E	1000	0408	8086	0002
+ * INTEL RAID Controller SRCZCRX	1000	0407	8086	0530
+ * INTEL RAID Controller SRCS28X	1000	0409	8086	3008
+ * INTEL RAID Controller SROMBU42E	1000	0408	8086	3431
+ * INTEL RAID Controller SROMBU42E	1000	0408	8086	3499
+ * INTEL RAID Controller SRCU51L	1000	1960	8086	0520
+ *
+ * FSC	MegaRAID PCI Express ROMB	1000	0408	1734	1065
+ *
+ * ACER	MegaRAID ROMB-2E		1000	0408	1025	004D
+ *
+ * NEC	MegaRAID PCI Express ROMB	1000	0408	1033	8287
+ *
+ * For history of changes, see Documentation/scsi/ChangeLog.megaraid
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "megaraid_mbox.h"
+
+static int megaraid_init(void);
+static void megaraid_exit(void);
+
+static int megaraid_probe_one(struct pci_dev*, const struct pci_device_id *);
+static void megaraid_detach_one(struct pci_dev *);
+static void megaraid_mbox_shutdown(struct pci_dev *);
+
+static int megaraid_io_attach(adapter_t *);
+static void megaraid_io_detach(adapter_t *);
+
+static int megaraid_init_mbox(adapter_t *);
+static void megaraid_fini_mbox(adapter_t *);
+
+static int megaraid_alloc_cmd_packets(adapter_t *);
+static void megaraid_free_cmd_packets(adapter_t *);
+
+static int megaraid_mbox_setup_dma_pools(adapter_t *);
+static void megaraid_mbox_teardown_dma_pools(adapter_t *);
+
+static int megaraid_sysfs_alloc_resources(adapter_t *);
+static void megaraid_sysfs_free_resources(adapter_t *);
+
+static int megaraid_abort_handler(struct scsi_cmnd *);
+static int megaraid_reset_handler(struct scsi_cmnd *);
+
+static int mbox_post_sync_cmd(adapter_t *, uint8_t []);
+static int mbox_post_sync_cmd_fast(adapter_t *, uint8_t []);
+static int megaraid_busywait_mbox(mraid_device_t *);
+static int megaraid_mbox_product_info(adapter_t *);
+static int megaraid_mbox_extended_cdb(adapter_t *);
+static int megaraid_mbox_support_ha(adapter_t *, uint16_t *);
+static int megaraid_mbox_support_random_del(adapter_t *);
+static int megaraid_mbox_get_max_sg(adapter_t *);
+static void megaraid_mbox_enum_raid_scsi(adapter_t *);
+static void megaraid_mbox_flush_cache(adapter_t *);
+static int megaraid_mbox_fire_sync_cmd(adapter_t *);
+
+static void megaraid_mbox_display_scb(adapter_t *, scb_t *);
+static void megaraid_mbox_setup_device_map(adapter_t *);
+
+static int megaraid_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
+static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *);
+static void megaraid_mbox_runpendq(adapter_t *, scb_t *);
+static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *,
+		struct scsi_cmnd *);
+static void megaraid_mbox_prepare_epthru(adapter_t *, scb_t *,
+		struct scsi_cmnd *);
+
+static irqreturn_t megaraid_isr(int, void *);
+
+static void megaraid_mbox_dpc(unsigned long);
+
+static ssize_t megaraid_sysfs_show_app_hndl(struct device *, struct device_attribute *attr, char *);
+static ssize_t megaraid_sysfs_show_ldnum(struct device *, struct device_attribute *attr, char *);
+
+static int megaraid_cmm_register(adapter_t *);
+static int megaraid_cmm_unregister(adapter_t *);
+static int megaraid_mbox_mm_handler(unsigned long, uioc_t *, uint32_t);
+static int megaraid_mbox_mm_command(adapter_t *, uioc_t *);
+static void megaraid_mbox_mm_done(adapter_t *, scb_t *);
+static int gather_hbainfo(adapter_t *, mraid_hba_info_t *);
+static int wait_till_fw_empty(adapter_t *);
+
+
+
+MODULE_AUTHOR("megaraidlinux@lsi.com");
+MODULE_DESCRIPTION("LSI Logic MegaRAID Mailbox Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MEGARAID_VERSION);
+
+/*
+ * ### modules parameters for driver ###
+ */
+
+/*
+ * Set to enable driver to expose unconfigured disk to kernel
+ */
+static int megaraid_expose_unconf_disks = 0;
+module_param_named(unconf_disks, megaraid_expose_unconf_disks, int, 0);
+MODULE_PARM_DESC(unconf_disks,
+	"Set to expose unconfigured disks to kernel (default=0)");
+
+/*
+ * driver wait time if the adapter's mailbox is busy
+ */
+static unsigned int max_mbox_busy_wait = MBOX_BUSY_WAIT;
+module_param_named(busy_wait, max_mbox_busy_wait, int, 0);
+MODULE_PARM_DESC(busy_wait,
+	"Max wait for mailbox in microseconds if busy (default=10)");
+
+/*
+ * number of sectors per IO command
+ */
+static unsigned int megaraid_max_sectors = MBOX_MAX_SECTORS;
+module_param_named(max_sectors, megaraid_max_sectors, int, 0);
+MODULE_PARM_DESC(max_sectors,
+	"Maximum number of sectors per IO command (default=128)");
+
+/*
+ * number of commands per logical unit
+ */
+static unsigned int megaraid_cmd_per_lun = MBOX_DEF_CMD_PER_LUN;
+module_param_named(cmd_per_lun, megaraid_cmd_per_lun, int, 0);
+MODULE_PARM_DESC(cmd_per_lun,
+	"Maximum number of commands per logical unit (default=64)");
+
+
+/*
+ * Fast driver load option, skip scanning for physical devices during load.
+ * This would result in non-disk devices being skipped during driver load
+ * time. These can be later added though, using /proc/scsi/scsi
+ */
+static unsigned int megaraid_fast_load = 0;
+module_param_named(fast_load, megaraid_fast_load, int, 0);
+MODULE_PARM_DESC(fast_load,
+	"Faster loading of the driver, skips physical devices! (default=0)");
+
+
+/*
+ * mraid_debug level - threshold for amount of information to be displayed by
+ * the driver. This level can be changed through modules parameters, ioctl or
+ * sysfs/proc interface. By default, print the announcement messages only.
+ */
+int mraid_debug_level = CL_ANN;
+module_param_named(debug_level, mraid_debug_level, int, 0);
+MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)");
+
+/*
+ * ### global data ###
+ */
+static uint8_t megaraid_mbox_version[8] =
+	{ 0x02, 0x20, 0x04, 0x06, 3, 7, 20, 5 };
+
+
+/*
+ * PCI table for all supported controllers.
+ */
+static struct pci_device_id pci_id_table_g[] =  {
+	{
+		PCI_VENDOR_ID_DELL,
+		PCI_DEVICE_ID_PERC4_DI_DISCOVERY,
+		PCI_VENDOR_ID_DELL,
+		PCI_SUBSYS_ID_PERC4_DI_DISCOVERY,
+	},
+	{
+		PCI_VENDOR_ID_LSI_LOGIC,
+		PCI_DEVICE_ID_PERC4_SC,
+		PCI_VENDOR_ID_DELL,
+		PCI_SUBSYS_ID_PERC4_SC,
+	},
+	{
+		PCI_VENDOR_ID_LSI_LOGIC,
+		PCI_DEVICE_ID_PERC4_DC,
+		PCI_VENDOR_ID_DELL,
+		PCI_SUBSYS_ID_PERC4_DC,
+	},
+	{
+		PCI_VENDOR_ID_LSI_LOGIC,
+		PCI_DEVICE_ID_VERDE,
+		PCI_ANY_ID,
+		PCI_ANY_ID,
+	},
+	{
+		PCI_VENDOR_ID_DELL,
+		PCI_DEVICE_ID_PERC4_DI_EVERGLADES,
+		PCI_VENDOR_ID_DELL,
+		PCI_SUBSYS_ID_PERC4_DI_EVERGLADES,
+	},
+	{
+		PCI_VENDOR_ID_DELL,
+		PCI_DEVICE_ID_PERC4E_SI_BIGBEND,
+		PCI_VENDOR_ID_DELL,
+		PCI_SUBSYS_ID_PERC4E_SI_BIGBEND,
+	},
+	{
+		PCI_VENDOR_ID_DELL,
+		PCI_DEVICE_ID_PERC4E_DI_KOBUK,
+		PCI_VENDOR_ID_DELL,
+		PCI_SUBSYS_ID_PERC4E_DI_KOBUK,
+	},
+	{
+		PCI_VENDOR_ID_DELL,
+		PCI_DEVICE_ID_PERC4E_DI_CORVETTE,
+		PCI_VENDOR_ID_DELL,
+		PCI_SUBSYS_ID_PERC4E_DI_CORVETTE,
+	},
+	{
+		PCI_VENDOR_ID_DELL,
+		PCI_DEVICE_ID_PERC4E_DI_EXPEDITION,
+		PCI_VENDOR_ID_DELL,
+		PCI_SUBSYS_ID_PERC4E_DI_EXPEDITION,
+	},
+	{
+		PCI_VENDOR_ID_DELL,
+		PCI_DEVICE_ID_PERC4E_DI_GUADALUPE,
+		PCI_VENDOR_ID_DELL,
+		PCI_SUBSYS_ID_PERC4E_DI_GUADALUPE,
+	},
+	{
+		PCI_VENDOR_ID_LSI_LOGIC,
+		PCI_DEVICE_ID_DOBSON,
+		PCI_ANY_ID,
+		PCI_ANY_ID,
+	},
+	{
+		PCI_VENDOR_ID_AMI,
+		PCI_DEVICE_ID_AMI_MEGARAID3,
+		PCI_ANY_ID,
+		PCI_ANY_ID,
+	},
+	{
+		PCI_VENDOR_ID_LSI_LOGIC,
+		PCI_DEVICE_ID_AMI_MEGARAID3,
+		PCI_ANY_ID,
+		PCI_ANY_ID,
+	},
+	{
+		PCI_VENDOR_ID_LSI_LOGIC,
+		PCI_DEVICE_ID_LINDSAY,
+		PCI_ANY_ID,
+		PCI_ANY_ID,
+	},
+	{0}	/* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, pci_id_table_g);
+
+
+static struct pci_driver megaraid_pci_driver = {
+	.name		= "megaraid",
+	.id_table	= pci_id_table_g,
+	.probe		= megaraid_probe_one,
+	.remove		= megaraid_detach_one,
+	.shutdown	= megaraid_mbox_shutdown,
+};
+
+
+
+// definitions for the device attributes for exporting logical drive number
+// for a scsi address (Host, Channel, Id, Lun)
+
+DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl,
+		NULL);
+
+// Host template initializer for megaraid mbox sysfs device attributes
+static struct device_attribute *megaraid_shost_attrs[] = {
+	&dev_attr_megaraid_mbox_app_hndl,
+	NULL,
+};
+
+
+DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL);
+
+// Host template initializer for megaraid mbox sysfs device attributes
+static struct device_attribute *megaraid_sdev_attrs[] = {
+	&dev_attr_megaraid_mbox_ld,
+	NULL,
+};
+
+/*
+ * Scsi host template for megaraid unified driver
+ */
+static struct scsi_host_template megaraid_template_g = {
+	.module				= THIS_MODULE,
+	.name				= "LSI Logic MegaRAID driver",
+	.proc_name			= "megaraid",
+	.queuecommand			= megaraid_queue_command,
+	.eh_abort_handler		= megaraid_abort_handler,
+	.eh_host_reset_handler		= megaraid_reset_handler,
+	.change_queue_depth		= scsi_change_queue_depth,
+	.use_clustering			= ENABLE_CLUSTERING,
+	.no_write_same			= 1,
+	.sdev_attrs			= megaraid_sdev_attrs,
+	.shost_attrs			= megaraid_shost_attrs,
+};
+
+
+/**
+ * megaraid_init - module load hook
+ *
+ * We register ourselves as hotplug enabled module and let PCI subsystem
+ * discover our adapters.
+ */
+static int __init
+megaraid_init(void)
+{
+	int	rval;
+
+	// Announce the driver version
+	con_log(CL_ANN, (KERN_INFO "megaraid: %s %s\n", MEGARAID_VERSION,
+		MEGARAID_EXT_VERSION));
+
+	// check validity of module parameters
+	if (megaraid_cmd_per_lun > MBOX_MAX_SCSI_CMDS) {
+
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid mailbox: max commands per lun reset to %d\n",
+			MBOX_MAX_SCSI_CMDS));
+
+		megaraid_cmd_per_lun = MBOX_MAX_SCSI_CMDS;
+	}
+
+
+	// register as a PCI hot-plug driver module
+	rval = pci_register_driver(&megaraid_pci_driver);
+	if (rval < 0) {
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid: could not register hotplug support.\n"));
+	}
+
+	return rval;
+}
+
+
+/**
+ * megaraid_exit - driver unload entry point
+ *
+ * We simply unwrap the megaraid_init routine here.
+ */
+static void __exit
+megaraid_exit(void)
+{
+	con_log(CL_DLEVEL1, (KERN_NOTICE "megaraid: unloading framework\n"));
+
+	// unregister as PCI hotplug driver
+	pci_unregister_driver(&megaraid_pci_driver);
+
+	return;
+}
+
+
+/**
+ * megaraid_probe_one - PCI hotplug entry point
+ * @pdev	: handle to this controller's PCI configuration space
+ * @id		: pci device id of the class of controllers
+ *
+ * This routine should be called whenever a new adapter is detected by the
+ * PCI hotplug susbsystem.
+ */
+static int
+megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	adapter_t	*adapter;
+
+
+	// detected a new controller
+	con_log(CL_ANN, (KERN_INFO
+		"megaraid: probe new device %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
+		pdev->vendor, pdev->device, pdev->subsystem_vendor,
+		pdev->subsystem_device));
+
+	con_log(CL_ANN, ("bus %d:slot %d:func %d\n", pdev->bus->number,
+		PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)));
+
+	if (pci_enable_device(pdev)) {
+		con_log(CL_ANN, (KERN_WARNING
+				"megaraid: pci_enable_device failed\n"));
+
+		return -ENODEV;
+	}
+
+	// Enable bus-mastering on this controller
+	pci_set_master(pdev);
+
+	// Allocate the per driver initialization structure
+	adapter = kzalloc(sizeof(adapter_t), GFP_KERNEL);
+
+	if (adapter == NULL) {
+		con_log(CL_ANN, (KERN_WARNING
+		"megaraid: out of memory, %s %d.\n", __func__, __LINE__));
+
+		goto out_probe_one;
+	}
+
+
+	// set up PCI related soft state and other pre-known parameters
+	adapter->unique_id	= pdev->bus->number << 8 | pdev->devfn;
+	adapter->irq		= pdev->irq;
+	adapter->pdev		= pdev;
+
+	atomic_set(&adapter->being_detached, 0);
+
+	// Setup the default DMA mask. This would be changed later on
+	// depending on hardware capabilities
+	if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(32)) != 0) {
+
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid: pci_set_dma_mask failed:%d\n", __LINE__));
+
+		goto out_free_adapter;
+	}
+
+
+	// Initialize the synchronization lock for kernel and LLD
+	spin_lock_init(&adapter->lock);
+
+	// Initialize the command queues: the list of free SCBs and the list
+	// of pending SCBs.
+	INIT_LIST_HEAD(&adapter->kscb_pool);
+	spin_lock_init(SCSI_FREE_LIST_LOCK(adapter));
+
+	INIT_LIST_HEAD(&adapter->pend_list);
+	spin_lock_init(PENDING_LIST_LOCK(adapter));
+
+	INIT_LIST_HEAD(&adapter->completed_list);
+	spin_lock_init(COMPLETED_LIST_LOCK(adapter));
+
+
+	// Start the mailbox based controller
+	if (megaraid_init_mbox(adapter) != 0) {
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid: maibox adapter did not initialize\n"));
+
+		goto out_free_adapter;
+	}
+
+	// Register with LSI Common Management Module
+	if (megaraid_cmm_register(adapter) != 0) {
+
+		con_log(CL_ANN, (KERN_WARNING
+		"megaraid: could not register with management module\n"));
+
+		goto out_fini_mbox;
+	}
+
+	// setup adapter handle in PCI soft state
+	pci_set_drvdata(pdev, adapter);
+
+	// attach with scsi mid-layer
+	if (megaraid_io_attach(adapter) != 0) {
+
+		con_log(CL_ANN, (KERN_WARNING "megaraid: io attach failed\n"));
+
+		goto out_cmm_unreg;
+	}
+
+	return 0;
+
+out_cmm_unreg:
+	megaraid_cmm_unregister(adapter);
+out_fini_mbox:
+	megaraid_fini_mbox(adapter);
+out_free_adapter:
+	kfree(adapter);
+out_probe_one:
+	pci_disable_device(pdev);
+
+	return -ENODEV;
+}
+
+
+/**
+ * megaraid_detach_one - release framework resources and call LLD release routine
+ * @pdev	: handle for our PCI configuration space
+ *
+ * This routine is called during driver unload. We free all the allocated
+ * resources and call the corresponding LLD so that it can also release all
+ * its resources.
+ *
+ * This routine is also called from the PCI hotplug system.
+ */
+static void
+megaraid_detach_one(struct pci_dev *pdev)
+{
+	adapter_t		*adapter;
+	struct Scsi_Host	*host;
+
+
+	// Start a rollback on this adapter
+	adapter = pci_get_drvdata(pdev);
+
+	if (!adapter) {
+		con_log(CL_ANN, (KERN_CRIT
+		"megaraid: Invalid detach on %#4.04x:%#4.04x:%#4.04x:%#4.04x\n",
+			pdev->vendor, pdev->device, pdev->subsystem_vendor,
+			pdev->subsystem_device));
+
+		return;
+	}
+	else {
+		con_log(CL_ANN, (KERN_NOTICE
+		"megaraid: detaching device %#4.04x:%#4.04x:%#4.04x:%#4.04x\n",
+			pdev->vendor, pdev->device, pdev->subsystem_vendor,
+			pdev->subsystem_device));
+	}
+
+
+	host = adapter->host;
+
+	// do not allow any more requests from the management module for this
+	// adapter.
+	// FIXME: How do we account for the request which might still be
+	// pending with us?
+	atomic_set(&adapter->being_detached, 1);
+
+	// detach from the IO sub-system
+	megaraid_io_detach(adapter);
+
+	// Unregister from common management module
+	//
+	// FIXME: this must return success or failure for conditions if there
+	// is a command pending with LLD or not.
+	megaraid_cmm_unregister(adapter);
+
+	// finalize the mailbox based controller and release all resources
+	megaraid_fini_mbox(adapter);
+
+	kfree(adapter);
+
+	scsi_host_put(host);
+
+	pci_disable_device(pdev);
+
+	return;
+}
+
+
+/**
+ * megaraid_mbox_shutdown - PCI shutdown for megaraid HBA
+ * @pdev		: generic driver model device
+ *
+ * Shutdown notification, perform flush cache.
+ */
+static void
+megaraid_mbox_shutdown(struct pci_dev *pdev)
+{
+	adapter_t		*adapter = pci_get_drvdata(pdev);
+	static int		counter;
+
+	if (!adapter) {
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid: null device in shutdown\n"));
+		return;
+	}
+
+	// flush caches now
+	con_log(CL_ANN, (KERN_INFO "megaraid: flushing adapter %d...",
+		counter++));
+
+	megaraid_mbox_flush_cache(adapter);
+
+	con_log(CL_ANN, ("done\n"));
+}
+
+
+/**
+ * megaraid_io_attach - attach a device with the IO subsystem
+ * @adapter		: controller's soft state
+ *
+ * Attach this device with the IO subsystem.
+ */
+static int
+megaraid_io_attach(adapter_t *adapter)
+{
+	struct Scsi_Host	*host;
+
+	// Initialize SCSI Host structure
+	host = scsi_host_alloc(&megaraid_template_g, 8);
+	if (!host) {
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid mbox: scsi_register failed\n"));
+
+		return -1;
+	}
+
+	SCSIHOST2ADAP(host)	= (caddr_t)adapter;
+	adapter->host		= host;
+
+	host->irq		= adapter->irq;
+	host->unique_id		= adapter->unique_id;
+	host->can_queue		= adapter->max_cmds;
+	host->this_id		= adapter->init_id;
+	host->sg_tablesize	= adapter->sglen;
+	host->max_sectors	= adapter->max_sectors;
+	host->cmd_per_lun	= adapter->cmd_per_lun;
+	host->max_channel	= adapter->max_channel;
+	host->max_id		= adapter->max_target;
+	host->max_lun		= adapter->max_lun;
+
+
+	// notify mid-layer about the new controller
+	if (scsi_add_host(host, &adapter->pdev->dev)) {
+
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid mbox: scsi_add_host failed\n"));
+
+		scsi_host_put(host);
+
+		return -1;
+	}
+
+	scsi_scan_host(host);
+
+	return 0;
+}
+
+
+/**
+ * megaraid_io_detach - detach a device from the IO subsystem
+ * @adapter		: controller's soft state
+ *
+ * Detach this device from the IO subsystem.
+ */
+static void
+megaraid_io_detach(adapter_t *adapter)
+{
+	struct Scsi_Host	*host;
+
+	con_log(CL_DLEVEL1, (KERN_INFO "megaraid: io detach\n"));
+
+	host = adapter->host;
+
+	scsi_remove_host(host);
+
+	return;
+}
+
+
+/*
+ * START: Mailbox Low Level Driver
+ *
+ * This is section specific to the single mailbox based controllers
+ */
+
+/**
+ * megaraid_init_mbox - initialize controller
+ * @adapter		: our soft state
+ *
+ * - Allocate 16-byte aligned mailbox memory for firmware handshake
+ * - Allocate controller's memory resources
+ * - Find out all initialization data
+ * - Allocate memory required for all the commands
+ * - Use internal library of FW routines, build up complete soft state
+ */
+static int
+megaraid_init_mbox(adapter_t *adapter)
+{
+	struct pci_dev		*pdev;
+	mraid_device_t		*raid_dev;
+	int			i;
+	uint32_t		magic64;
+
+
+	adapter->ito	= MBOX_TIMEOUT;
+	pdev		= adapter->pdev;
+
+	/*
+	 * Allocate and initialize the init data structure for mailbox
+	 * controllers
+	 */
+	raid_dev = kzalloc(sizeof(mraid_device_t), GFP_KERNEL);
+	if (raid_dev == NULL) return -1;
+
+
+	/*
+	 * Attach the adapter soft state to raid device soft state
+	 */
+	adapter->raid_device	= (caddr_t)raid_dev;
+	raid_dev->fast_load	= megaraid_fast_load;
+
+
+	// our baseport
+	raid_dev->baseport = pci_resource_start(pdev, 0);
+
+	if (pci_request_regions(pdev, "MegaRAID: LSI Logic Corporation") != 0) {
+
+		con_log(CL_ANN, (KERN_WARNING
+				"megaraid: mem region busy\n"));
+
+		goto out_free_raid_dev;
+	}
+
+	raid_dev->baseaddr = ioremap_nocache(raid_dev->baseport, 128);
+
+	if (!raid_dev->baseaddr) {
+
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid: could not map hba memory\n") );
+
+		goto out_release_regions;
+	}
+
+	/* initialize the mutual exclusion lock for the mailbox */
+	spin_lock_init(&raid_dev->mailbox_lock);
+
+	/* allocate memory required for commands */
+	if (megaraid_alloc_cmd_packets(adapter) != 0)
+		goto out_iounmap;
+
+	/*
+	 * Issue SYNC cmd to flush the pending cmds in the adapter
+	 * and initialize its internal state
+	 */
+
+	if (megaraid_mbox_fire_sync_cmd(adapter))
+		con_log(CL_ANN, ("megaraid: sync cmd failed\n"));
+
+	/*
+	 * Setup the rest of the soft state using the library of
+	 * FW routines
+	 */
+
+	/* request IRQ and register the interrupt service routine */
+	if (request_irq(adapter->irq, megaraid_isr, IRQF_SHARED, "megaraid",
+		adapter)) {
+
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid: Couldn't register IRQ %d!\n", adapter->irq));
+		goto out_alloc_cmds;
+
+	}
+
+	// Product info
+	if (megaraid_mbox_product_info(adapter) != 0)
+		goto out_free_irq;
+
+	// Do we support extended CDBs
+	adapter->max_cdb_sz = 10;
+	if (megaraid_mbox_extended_cdb(adapter) == 0) {
+		adapter->max_cdb_sz = 16;
+	}
+
+	/*
+	 * Do we support cluster environment, if we do, what is the initiator
+	 * id.
+	 * NOTE: In a non-cluster aware firmware environment, the LLD should
+	 * return 7 as initiator id.
+	 */
+	adapter->ha		= 0;
+	adapter->init_id	= -1;
+	if (megaraid_mbox_support_ha(adapter, &adapter->init_id) == 0) {
+		adapter->ha = 1;
+	}
+
+	/*
+	 * Prepare the device ids array to have the mapping between the kernel
+	 * device address and megaraid device address.
+	 * We export the physical devices on their actual addresses. The
+	 * logical drives are exported on a virtual SCSI channel
+	 */
+	megaraid_mbox_setup_device_map(adapter);
+
+	// If the firmware supports random deletion, update the device id map
+	if (megaraid_mbox_support_random_del(adapter)) {
+
+		// Change the logical drives numbers in device_ids array one
+		// slot in device_ids is reserved for target id, that's why
+		// "<=" below
+		for (i = 0; i <= MAX_LOGICAL_DRIVES_40LD; i++) {
+			adapter->device_ids[adapter->max_channel][i] += 0x80;
+		}
+		adapter->device_ids[adapter->max_channel][adapter->init_id] =
+			0xFF;
+
+		raid_dev->random_del_supported = 1;
+	}
+
+	/*
+	 * find out the maximum number of scatter-gather elements supported by
+	 * this firmware
+	 */
+	adapter->sglen = megaraid_mbox_get_max_sg(adapter);
+
+	// enumerate RAID and SCSI channels so that all devices on SCSI
+	// channels can later be exported, including disk devices
+	megaraid_mbox_enum_raid_scsi(adapter);
+
+	/*
+	 * Other parameters required by upper layer
+	 *
+	 * maximum number of sectors per IO command
+	 */
+	adapter->max_sectors = megaraid_max_sectors;
+
+	/*
+	 * number of queued commands per LUN.
+	 */
+	adapter->cmd_per_lun = megaraid_cmd_per_lun;
+
+	/*
+	 * Allocate resources required to issue FW calls, when sysfs is
+	 * accessed
+	 */
+	if (megaraid_sysfs_alloc_resources(adapter) != 0)
+		goto out_free_irq;
+
+	// Set the DMA mask to 64-bit. All supported controllers as capable of
+	// DMA in this range
+	pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64);
+
+	if (((magic64 == HBA_SIGNATURE_64_BIT) &&
+		((adapter->pdev->subsystem_device !=
+		PCI_SUBSYS_ID_MEGARAID_SATA_150_6) &&
+		(adapter->pdev->subsystem_device !=
+		PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) ||
+		(adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
+		adapter->pdev->device == PCI_DEVICE_ID_VERDE) ||
+		(adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
+		adapter->pdev->device == PCI_DEVICE_ID_DOBSON) ||
+		(adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
+		adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) ||
+		(adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
+		adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
+		(adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
+		adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
+		if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(64))) {
+			con_log(CL_ANN, (KERN_WARNING
+				"megaraid: DMA mask for 64-bit failed\n"));
+
+			if (pci_set_dma_mask (adapter->pdev, DMA_BIT_MASK(32))) {
+				con_log(CL_ANN, (KERN_WARNING
+					"megaraid: 32-bit DMA mask failed\n"));
+				goto out_free_sysfs_res;
+			}
+		}
+	}
+
+	// setup tasklet for DPC
+	tasklet_init(&adapter->dpc_h, megaraid_mbox_dpc,
+			(unsigned long)adapter);
+
+	con_log(CL_DLEVEL1, (KERN_INFO
+		"megaraid mbox hba successfully initialized\n"));
+
+	return 0;
+
+out_free_sysfs_res:
+	megaraid_sysfs_free_resources(adapter);
+out_free_irq:
+	free_irq(adapter->irq, adapter);
+out_alloc_cmds:
+	megaraid_free_cmd_packets(adapter);
+out_iounmap:
+	iounmap(raid_dev->baseaddr);
+out_release_regions:
+	pci_release_regions(pdev);
+out_free_raid_dev:
+	kfree(raid_dev);
+
+	return -1;
+}
+
+
+/**
+ * megaraid_fini_mbox - undo controller initialization
+ * @adapter		: our soft state
+ */
+static void
+megaraid_fini_mbox(adapter_t *adapter)
+{
+	mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+
+	// flush all caches
+	megaraid_mbox_flush_cache(adapter);
+
+	tasklet_kill(&adapter->dpc_h);
+
+	megaraid_sysfs_free_resources(adapter);
+
+	megaraid_free_cmd_packets(adapter);
+
+	free_irq(adapter->irq, adapter);
+
+	iounmap(raid_dev->baseaddr);
+
+	pci_release_regions(adapter->pdev);
+
+	kfree(raid_dev);
+
+	return;
+}
+
+
+/**
+ * megaraid_alloc_cmd_packets - allocate shared mailbox
+ * @adapter		: soft state of the raid controller
+ *
+ * Allocate and align the shared mailbox. This maibox is used to issue
+ * all the commands. For IO based controllers, the mailbox is also registered
+ * with the FW. Allocate memory for all commands as well.
+ * This is our big allocator.
+ */
+static int
+megaraid_alloc_cmd_packets(adapter_t *adapter)
+{
+	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);
+	struct pci_dev		*pdev;
+	unsigned long		align;
+	scb_t			*scb;
+	mbox_ccb_t		*ccb;
+	struct mraid_pci_blk	*epthru_pci_blk;
+	struct mraid_pci_blk	*sg_pci_blk;
+	struct mraid_pci_blk	*mbox_pci_blk;
+	int			i;
+
+	pdev = adapter->pdev;
+
+	/*
+	 * Setup the mailbox
+	 * Allocate the common 16-byte aligned memory for the handshake
+	 * mailbox.
+	 */
+	raid_dev->una_mbox64 = pci_zalloc_consistent(adapter->pdev,
+						     sizeof(mbox64_t),
+						     &raid_dev->una_mbox64_dma);
+
+	if (!raid_dev->una_mbox64) {
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid: out of memory, %s %d\n", __func__,
+			__LINE__));
+		return -1;
+	}
+
+	/*
+	 * Align the mailbox at 16-byte boundary
+	 */
+	raid_dev->mbox	= &raid_dev->una_mbox64->mbox32;
+
+	raid_dev->mbox	= (mbox_t *)((((unsigned long)raid_dev->mbox) + 15) &
+				(~0UL ^ 0xFUL));
+
+	raid_dev->mbox64 = (mbox64_t *)(((unsigned long)raid_dev->mbox) - 8);
+
+	align = ((void *)raid_dev->mbox -
+			((void *)&raid_dev->una_mbox64->mbox32));
+
+	raid_dev->mbox_dma = (unsigned long)raid_dev->una_mbox64_dma + 8 +
+			align;
+
+	// Allocate memory for commands issued internally
+	adapter->ibuf = pci_zalloc_consistent(pdev, MBOX_IBUF_SIZE,
+					      &adapter->ibuf_dma_h);
+	if (!adapter->ibuf) {
+
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid: out of memory, %s %d\n", __func__,
+			__LINE__));
+
+		goto out_free_common_mbox;
+	}
+
+	// Allocate memory for our SCSI Command Blocks and their associated
+	// memory
+
+	/*
+	 * Allocate memory for the base list of scb. Later allocate memory for
+	 * CCBs and embedded components of each CCB and point the pointers in
+	 * scb to the allocated components
+	 * NOTE: The code to allocate SCB will be duplicated in all the LLD
+	 * since the calling routine does not yet know the number of available
+	 * commands.
+	 */
+	adapter->kscb_list = kcalloc(MBOX_MAX_SCSI_CMDS, sizeof(scb_t), GFP_KERNEL);
+
+	if (adapter->kscb_list == NULL) {
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid: out of memory, %s %d\n", __func__,
+			__LINE__));
+		goto out_free_ibuf;
+	}
+
+	// memory allocation for our command packets
+	if (megaraid_mbox_setup_dma_pools(adapter) != 0) {
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid: out of memory, %s %d\n", __func__,
+			__LINE__));
+		goto out_free_scb_list;
+	}
+
+	// Adjust the scb pointers and link in the free pool
+	epthru_pci_blk	= raid_dev->epthru_pool;
+	sg_pci_blk	= raid_dev->sg_pool;
+	mbox_pci_blk	= raid_dev->mbox_pool;
+
+	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
+		scb			= adapter->kscb_list + i;
+		ccb			= raid_dev->ccb_list + i;
+
+		ccb->mbox	= (mbox_t *)(mbox_pci_blk[i].vaddr + 16);
+		ccb->raw_mbox	= (uint8_t *)ccb->mbox;
+		ccb->mbox64	= (mbox64_t *)(mbox_pci_blk[i].vaddr + 8);
+		ccb->mbox_dma_h	= (unsigned long)mbox_pci_blk[i].dma_addr + 16;
+
+		// make sure the mailbox is aligned properly
+		if (ccb->mbox_dma_h & 0x0F) {
+			con_log(CL_ANN, (KERN_CRIT
+				"megaraid mbox: not aligned on 16-bytes\n"));
+
+			goto out_teardown_dma_pools;
+		}
+
+		ccb->epthru		= (mraid_epassthru_t *)
+						epthru_pci_blk[i].vaddr;
+		ccb->epthru_dma_h	= epthru_pci_blk[i].dma_addr;
+		ccb->pthru		= (mraid_passthru_t *)ccb->epthru;
+		ccb->pthru_dma_h	= ccb->epthru_dma_h;
+
+
+		ccb->sgl64		= (mbox_sgl64 *)sg_pci_blk[i].vaddr;
+		ccb->sgl_dma_h		= sg_pci_blk[i].dma_addr;
+		ccb->sgl32		= (mbox_sgl32 *)ccb->sgl64;
+
+		scb->ccb		= (caddr_t)ccb;
+		scb->gp			= 0;
+
+		scb->sno		= i;	// command index
+
+		scb->scp		= NULL;
+		scb->state		= SCB_FREE;
+		scb->dma_direction	= PCI_DMA_NONE;
+		scb->dma_type		= MRAID_DMA_NONE;
+		scb->dev_channel	= -1;
+		scb->dev_target		= -1;
+
+		// put scb in the free pool
+		list_add_tail(&scb->list, &adapter->kscb_pool);
+	}
+
+	return 0;
+
+out_teardown_dma_pools:
+	megaraid_mbox_teardown_dma_pools(adapter);
+out_free_scb_list:
+	kfree(adapter->kscb_list);
+out_free_ibuf:
+	pci_free_consistent(pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
+		adapter->ibuf_dma_h);
+out_free_common_mbox:
+	pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
+		(caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
+
+	return -1;
+}
+
+
+/**
+ * megaraid_free_cmd_packets - free memory
+ * @adapter		: soft state of the raid controller
+ *
+ * Release memory resources allocated for commands.
+ */
+static void
+megaraid_free_cmd_packets(adapter_t *adapter)
+{
+	mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
+
+	megaraid_mbox_teardown_dma_pools(adapter);
+
+	kfree(adapter->kscb_list);
+
+	pci_free_consistent(adapter->pdev, MBOX_IBUF_SIZE,
+		(void *)adapter->ibuf, adapter->ibuf_dma_h);
+
+	pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
+		(caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
+	return;
+}
+
+
+/**
+ * megaraid_mbox_setup_dma_pools - setup dma pool for command packets
+ * @adapter		: HBA soft state
+ *
+ * Setup the dma pools for mailbox, passthru and extended passthru structures,
+ * and scatter-gather lists.
+ */
+static int
+megaraid_mbox_setup_dma_pools(adapter_t *adapter)
+{
+	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);
+	struct mraid_pci_blk	*epthru_pci_blk;
+	struct mraid_pci_blk	*sg_pci_blk;
+	struct mraid_pci_blk	*mbox_pci_blk;
+	int			i;
+
+
+
+	// Allocate memory for 16-bytes aligned mailboxes
+	raid_dev->mbox_pool_handle = dma_pool_create("megaraid mbox pool",
+						&adapter->pdev->dev,
+						sizeof(mbox64_t) + 16,
+						16, 0);
+
+	if (raid_dev->mbox_pool_handle == NULL) {
+		goto fail_setup_dma_pool;
+	}
+
+	mbox_pci_blk = raid_dev->mbox_pool;
+	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
+		mbox_pci_blk[i].vaddr = dma_pool_alloc(
+						raid_dev->mbox_pool_handle,
+						GFP_KERNEL,
+						&mbox_pci_blk[i].dma_addr);
+		if (!mbox_pci_blk[i].vaddr) {
+			goto fail_setup_dma_pool;
+		}
+	}
+
+	/*
+	 * Allocate memory for each embedded passthru strucuture pointer
+	 * Request for a 128 bytes aligned structure for each passthru command
+	 * structure
+	 * Since passthru and extended passthru commands are exclusive, they
+	 * share common memory pool. Passthru structures piggyback on memory
+	 * allocted to extended passthru since passthru is smaller of the two
+	 */
+	raid_dev->epthru_pool_handle = dma_pool_create("megaraid mbox pthru",
+			&adapter->pdev->dev, sizeof(mraid_epassthru_t), 128, 0);
+
+	if (raid_dev->epthru_pool_handle == NULL) {
+		goto fail_setup_dma_pool;
+	}
+
+	epthru_pci_blk = raid_dev->epthru_pool;
+	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
+		epthru_pci_blk[i].vaddr = dma_pool_alloc(
+						raid_dev->epthru_pool_handle,
+						GFP_KERNEL,
+						&epthru_pci_blk[i].dma_addr);
+		if (!epthru_pci_blk[i].vaddr) {
+			goto fail_setup_dma_pool;
+		}
+	}
+
+
+	// Allocate memory for each scatter-gather list. Request for 512 bytes
+	// alignment for each sg list
+	raid_dev->sg_pool_handle = dma_pool_create("megaraid mbox sg",
+					&adapter->pdev->dev,
+					sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE,
+					512, 0);
+
+	if (raid_dev->sg_pool_handle == NULL) {
+		goto fail_setup_dma_pool;
+	}
+
+	sg_pci_blk = raid_dev->sg_pool;
+	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
+		sg_pci_blk[i].vaddr = dma_pool_alloc(
+						raid_dev->sg_pool_handle,
+						GFP_KERNEL,
+						&sg_pci_blk[i].dma_addr);
+		if (!sg_pci_blk[i].vaddr) {
+			goto fail_setup_dma_pool;
+		}
+	}
+
+	return 0;
+
+fail_setup_dma_pool:
+	megaraid_mbox_teardown_dma_pools(adapter);
+	return -1;
+}
+
+
+/**
+ * megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets
+ * @adapter		: HBA soft state
+ *
+ * Teardown the dma pool for mailbox, passthru and extended passthru
+ * structures, and scatter-gather lists.
+ */
+static void
+megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
+{
+	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);
+	struct mraid_pci_blk	*epthru_pci_blk;
+	struct mraid_pci_blk	*sg_pci_blk;
+	struct mraid_pci_blk	*mbox_pci_blk;
+	int			i;
+
+
+	sg_pci_blk = raid_dev->sg_pool;
+	for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) {
+		dma_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
+			sg_pci_blk[i].dma_addr);
+	}
+	if (raid_dev->sg_pool_handle)
+		dma_pool_destroy(raid_dev->sg_pool_handle);
+
+
+	epthru_pci_blk = raid_dev->epthru_pool;
+	for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) {
+		dma_pool_free(raid_dev->epthru_pool_handle,
+			epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr);
+	}
+	if (raid_dev->epthru_pool_handle)
+		dma_pool_destroy(raid_dev->epthru_pool_handle);
+
+
+	mbox_pci_blk = raid_dev->mbox_pool;
+	for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) {
+		dma_pool_free(raid_dev->mbox_pool_handle,
+			mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr);
+	}
+	if (raid_dev->mbox_pool_handle)
+		dma_pool_destroy(raid_dev->mbox_pool_handle);
+
+	return;
+}
+
+
+/**
+ * megaraid_alloc_scb - detach and return a scb from the free list
+ * @adapter	: controller's soft state
+ * @scp		: pointer to the scsi command to be executed
+ *
+ * Return the scb from the head of the free list. %NULL if there are none
+ * available.
+ */
+static scb_t *
+megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp)
+{
+	struct list_head	*head = &adapter->kscb_pool;
+	scb_t			*scb = NULL;
+	unsigned long		flags;
+
+	// detach scb from free pool
+	spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
+
+	if (list_empty(head)) {
+		spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
+		return NULL;
+	}
+
+	scb = list_entry(head->next, scb_t, list);
+	list_del_init(&scb->list);
+
+	spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
+
+	scb->state	= SCB_ACTIVE;
+	scb->scp	= scp;
+	scb->dma_type	= MRAID_DMA_NONE;
+
+	return scb;
+}
+
+
+/**
+ * megaraid_dealloc_scb - return the scb to the free pool
+ * @adapter	: controller's soft state
+ * @scb		: scb to be freed
+ *
+ * Return the scb back to the free list of scbs. The caller must 'flush' the
+ * SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc.
+ * NOTE NOTE: Make sure the scb is not on any list before calling this
+ * routine.
+ */
+static inline void
+megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb)
+{
+	unsigned long		flags;
+
+	// put scb in the free pool
+	scb->state	= SCB_FREE;
+	scb->scp	= NULL;
+	spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
+
+	list_add(&scb->list, &adapter->kscb_pool);
+
+	spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
+
+	return;
+}
+
+
+/**
+ * megaraid_mbox_mksgl - make the scatter-gather list
+ * @adapter	: controller's soft state
+ * @scb		: scsi control block
+ *
+ * Prepare the scatter-gather list.
+ */
+static int
+megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
+{
+	struct scatterlist	*sgl;
+	mbox_ccb_t		*ccb;
+	struct scsi_cmnd	*scp;
+	int			sgcnt;
+	int			i;
+
+
+	scp	= scb->scp;
+	ccb	= (mbox_ccb_t *)scb->ccb;
+
+	sgcnt = scsi_dma_map(scp);
+	BUG_ON(sgcnt < 0 || sgcnt > adapter->sglen);
+
+	// no mapping required if no data to be transferred
+	if (!sgcnt)
+		return 0;
+
+	scb->dma_type = MRAID_DMA_WSG;
+
+	scsi_for_each_sg(scp, sgl, sgcnt, i) {
+		ccb->sgl64[i].address	= sg_dma_address(sgl);
+		ccb->sgl64[i].length	= sg_dma_len(sgl);
+	}
+
+	// Return count of SG nodes
+	return sgcnt;
+}
+
+
+/**
+ * mbox_post_cmd - issue a mailbox command
+ * @adapter	: controller's soft state
+ * @scb		: command to be issued
+ *
+ * Post the command to the controller if mailbox is available.
+ */
+static int
+mbox_post_cmd(adapter_t *adapter, scb_t *scb)
+{
+	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
+	mbox64_t	*mbox64;
+	mbox_t		*mbox;
+	mbox_ccb_t	*ccb;
+	unsigned long	flags;
+	unsigned int	i = 0;
+
+
+	ccb	= (mbox_ccb_t *)scb->ccb;
+	mbox	= raid_dev->mbox;
+	mbox64	= raid_dev->mbox64;
+
+	/*
+	 * Check for busy mailbox. If it is, return failure - the caller
+	 * should retry later.
+	 */
+	spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
+
+	if (unlikely(mbox->busy)) {
+		do {
+			udelay(1);
+			i++;
+			rmb();
+		} while(mbox->busy && (i < max_mbox_busy_wait));
+
+		if (mbox->busy) {
+
+			spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
+
+			return -1;
+		}
+	}
+
+
+	// Copy this command's mailbox data into "adapter's" mailbox
+	memcpy((caddr_t)mbox64, (caddr_t)ccb->mbox64, 22);
+	mbox->cmdid = scb->sno;
+
+	adapter->outstanding_cmds++;
+
+	if (scb->dma_direction == PCI_DMA_TODEVICE)
+		pci_dma_sync_sg_for_device(adapter->pdev,
+					   scsi_sglist(scb->scp),
+					   scsi_sg_count(scb->scp),
+					   PCI_DMA_TODEVICE);
+
+	mbox->busy	= 1;	// Set busy
+	mbox->poll	= 0;
+	mbox->ack	= 0;
+	wmb();
+
+	WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
+
+	spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
+
+	return 0;
+}
+
+
+/**
+ * megaraid_queue_command - generic queue entry point for all LLDs
+ * @scp		: pointer to the scsi command to be executed
+ * @done	: callback routine to be called after the cmd has be completed
+ *
+ * Queue entry point for mailbox based controllers.
+ */
+static int
+megaraid_queue_command_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
+{
+	adapter_t	*adapter;
+	scb_t		*scb;
+	int		if_busy;
+
+	adapter		= SCP2ADAPTER(scp);
+	scp->scsi_done	= done;
+	scp->result	= 0;
+
+	/*
+	 * Allocate and build a SCB request
+	 * if_busy flag will be set if megaraid_mbox_build_cmd() command could
+	 * not allocate scb. We will return non-zero status in that case.
+	 * NOTE: scb can be null even though certain commands completed
+	 * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, it would
+	 * return 0 in that case, and we would do the callback right away.
+	 */
+	if_busy	= 0;
+	scb = megaraid_mbox_build_cmd(adapter, scp, &if_busy);
+	if (!scb) {	// command already completed
+		done(scp);
+		return 0;
+	}
+
+	megaraid_mbox_runpendq(adapter, scb);
+	return if_busy;
+}
+
+static DEF_SCSI_QCMD(megaraid_queue_command)
+
+/**
+ * megaraid_mbox_build_cmd - transform the mid-layer scsi commands
+ * @adapter	: controller's soft state
+ * @scp		: mid-layer scsi command pointer
+ * @busy	: set if request could not be completed because of lack of
+ *		resources
+ *
+ * Transform the mid-layer scsi command to megaraid firmware lingua.
+ * Convert the command issued by mid-layer to format understood by megaraid
+ * firmware. We also complete certain commands without sending them to firmware.
+ */
+static scb_t *
+megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
+{
+	mraid_device_t		*rdev = ADAP2RAIDDEV(adapter);
+	int			channel;
+	int			target;
+	int			islogical;
+	mbox_ccb_t		*ccb;
+	mraid_passthru_t	*pthru;
+	mbox64_t		*mbox64;
+	mbox_t			*mbox;
+	scb_t			*scb;
+	char			skip[] = "skipping";
+	char			scan[] = "scanning";
+	char			*ss;
+
+
+	/*
+	 * Get the appropriate device map for the device this command is
+	 * intended for
+	 */
+	MRAID_GET_DEVICE_MAP(adapter, scp, channel, target, islogical);
+
+	/*
+	 * Logical drive commands
+	 */
+	if (islogical) {
+		switch (scp->cmnd[0]) {
+		case TEST_UNIT_READY:
+			/*
+			 * Do we support clustering and is the support enabled
+			 * If no, return success always
+			 */
+			if (!adapter->ha) {
+				scp->result = (DID_OK << 16);
+				return NULL;
+			}
+
+			if (!(scb = megaraid_alloc_scb(adapter, scp))) {
+				scp->result = (DID_ERROR << 16);
+				*busy = 1;
+				return NULL;
+			}
+
+			scb->dma_direction	= scp->sc_data_direction;
+			scb->dev_channel	= 0xFF;
+			scb->dev_target		= target;
+			ccb			= (mbox_ccb_t *)scb->ccb;
+
+			/*
+			 * The command id will be provided by the command
+			 * issuance routine
+			 */
+			ccb->raw_mbox[0]	= CLUSTER_CMD;
+			ccb->raw_mbox[2]	= RESERVATION_STATUS;
+			ccb->raw_mbox[3]	= target;
+
+			return scb;
+
+		case MODE_SENSE:
+		{
+			struct scatterlist	*sgl;
+			caddr_t			vaddr;
+
+			sgl = scsi_sglist(scp);
+			if (sg_page(sgl)) {
+				vaddr = (caddr_t) sg_virt(&sgl[0]);
+
+				memset(vaddr, 0, scp->cmnd[4]);
+			}
+			else {
+				con_log(CL_ANN, (KERN_WARNING
+						 "megaraid mailbox: invalid sg:%d\n",
+						 __LINE__));
+			}
+		}
+		scp->result = (DID_OK << 16);
+		return NULL;
+
+		case INQUIRY:
+			/*
+			 * Display the channel scan for logical drives
+			 * Do not display scan for a channel if already done.
+			 */
+			if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) {
+
+				con_log(CL_ANN, (KERN_INFO
+					"scsi[%d]: scanning scsi channel %d",
+					adapter->host->host_no,
+					SCP2CHANNEL(scp)));
+
+				con_log(CL_ANN, (
+					" [virtual] for logical drives\n"));
+
+				rdev->last_disp |= (1L << SCP2CHANNEL(scp));
+			}
+
+			if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) {
+				scp->sense_buffer[0] = 0x70;
+				scp->sense_buffer[2] = ILLEGAL_REQUEST;
+				scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB;
+				scp->result = CHECK_CONDITION << 1;
+				return NULL;
+			}
+
+			/* Fall through */
+
+		case READ_CAPACITY:
+			/*
+			 * Do not allow LUN > 0 for logical drives and
+			 * requests for more than 40 logical drives
+			 */
+			if (SCP2LUN(scp)) {
+				scp->result = (DID_BAD_TARGET << 16);
+				return NULL;
+			}
+			if ((target % 0x80) >= MAX_LOGICAL_DRIVES_40LD) {
+				scp->result = (DID_BAD_TARGET << 16);
+				return NULL;
+			}
+
+
+			/* Allocate a SCB and initialize passthru */
+			if (!(scb = megaraid_alloc_scb(adapter, scp))) {
+				scp->result = (DID_ERROR << 16);
+				*busy = 1;
+				return NULL;
+			}
+
+			ccb			= (mbox_ccb_t *)scb->ccb;
+			scb->dev_channel	= 0xFF;
+			scb->dev_target		= target;
+			pthru			= ccb->pthru;
+			mbox			= ccb->mbox;
+			mbox64			= ccb->mbox64;
+
+			pthru->timeout		= 0;
+			pthru->ars		= 1;
+			pthru->reqsenselen	= 14;
+			pthru->islogical	= 1;
+			pthru->logdrv		= target;
+			pthru->cdblen		= scp->cmd_len;
+			memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
+
+			mbox->cmd		= MBOXCMD_PASSTHRU64;
+			scb->dma_direction	= scp->sc_data_direction;
+
+			pthru->dataxferlen	= scsi_bufflen(scp);
+			pthru->dataxferaddr	= ccb->sgl_dma_h;
+			pthru->numsge		= megaraid_mbox_mksgl(adapter,
+							scb);
+
+			mbox->xferaddr		= 0xFFFFFFFF;
+			mbox64->xferaddr_lo	= (uint32_t )ccb->pthru_dma_h;
+			mbox64->xferaddr_hi	= 0;
+
+			return scb;
+
+		case READ_6:
+		case WRITE_6:
+		case READ_10:
+		case WRITE_10:
+		case READ_12:
+		case WRITE_12:
+
+			/*
+			 * Allocate a SCB and initialize mailbox
+			 */
+			if (!(scb = megaraid_alloc_scb(adapter, scp))) {
+				scp->result = (DID_ERROR << 16);
+				*busy = 1;
+				return NULL;
+			}
+			ccb			= (mbox_ccb_t *)scb->ccb;
+			scb->dev_channel	= 0xFF;
+			scb->dev_target		= target;
+			mbox			= ccb->mbox;
+			mbox64			= ccb->mbox64;
+			mbox->logdrv		= target;
+
+			/*
+			 * A little HACK: 2nd bit is zero for all scsi read
+			 * commands and is set for all scsi write commands
+			 */
+			mbox->cmd = (scp->cmnd[0] & 0x02) ?  MBOXCMD_LWRITE64:
+					MBOXCMD_LREAD64 ;
+
+			/*
+			 * 6-byte READ(0x08) or WRITE(0x0A) cdb
+			 */
+			if (scp->cmd_len == 6) {
+				mbox->numsectors = (uint32_t)scp->cmnd[4];
+				mbox->lba =
+					((uint32_t)scp->cmnd[1] << 16)	|
+					((uint32_t)scp->cmnd[2] << 8)	|
+					(uint32_t)scp->cmnd[3];
+
+				mbox->lba &= 0x1FFFFF;
+			}
+
+			/*
+			 * 10-byte READ(0x28) or WRITE(0x2A) cdb
+			 */
+			else if (scp->cmd_len == 10) {
+				mbox->numsectors =
+					(uint32_t)scp->cmnd[8] |
+					((uint32_t)scp->cmnd[7] << 8);
+				mbox->lba =
+					((uint32_t)scp->cmnd[2] << 24) |
+					((uint32_t)scp->cmnd[3] << 16) |
+					((uint32_t)scp->cmnd[4] << 8) |
+					(uint32_t)scp->cmnd[5];
+			}
+
+			/*
+			 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
+			 */
+			else if (scp->cmd_len == 12) {
+				mbox->lba =
+					((uint32_t)scp->cmnd[2] << 24) |
+					((uint32_t)scp->cmnd[3] << 16) |
+					((uint32_t)scp->cmnd[4] << 8) |
+					(uint32_t)scp->cmnd[5];
+
+				mbox->numsectors =
+					((uint32_t)scp->cmnd[6] << 24) |
+					((uint32_t)scp->cmnd[7] << 16) |
+					((uint32_t)scp->cmnd[8] << 8) |
+					(uint32_t)scp->cmnd[9];
+			}
+			else {
+				con_log(CL_ANN, (KERN_WARNING
+					"megaraid: unsupported CDB length\n"));
+
+				megaraid_dealloc_scb(adapter, scb);
+
+				scp->result = (DID_ERROR << 16);
+				return NULL;
+			}
+
+			scb->dma_direction = scp->sc_data_direction;
+
+			// Calculate Scatter-Gather info
+			mbox64->xferaddr_lo	= (uint32_t )ccb->sgl_dma_h;
+			mbox->numsge		= megaraid_mbox_mksgl(adapter,
+							scb);
+			mbox->xferaddr		= 0xFFFFFFFF;
+			mbox64->xferaddr_hi	= 0;
+
+			return scb;
+
+		case RESERVE:
+		case RELEASE:
+			/*
+			 * Do we support clustering and is the support enabled
+			 */
+			if (!adapter->ha) {
+				scp->result = (DID_BAD_TARGET << 16);
+				return NULL;
+			}
+
+			/*
+			 * Allocate a SCB and initialize mailbox
+			 */
+			if (!(scb = megaraid_alloc_scb(adapter, scp))) {
+				scp->result = (DID_ERROR << 16);
+				*busy = 1;
+				return NULL;
+			}
+
+			ccb			= (mbox_ccb_t *)scb->ccb;
+			scb->dev_channel	= 0xFF;
+			scb->dev_target		= target;
+			ccb->raw_mbox[0]	= CLUSTER_CMD;
+			ccb->raw_mbox[2]	=  (scp->cmnd[0] == RESERVE) ?
+						RESERVE_LD : RELEASE_LD;
+
+			ccb->raw_mbox[3]	= target;
+			scb->dma_direction	= scp->sc_data_direction;
+
+			return scb;
+
+		default:
+			scp->result = (DID_BAD_TARGET << 16);
+			return NULL;
+		}
+	}
+	else { // Passthru device commands
+
+		// Do not allow access to target id > 15 or LUN > 7
+		if (target > 15 || SCP2LUN(scp) > 7) {
+			scp->result = (DID_BAD_TARGET << 16);
+			return NULL;
+		}
+
+		// if fast load option was set and scan for last device is
+		// over, reset the fast_load flag so that during a possible
+		// next scan, devices can be made available
+		if (rdev->fast_load && (target == 15) &&
+			(SCP2CHANNEL(scp) == adapter->max_channel -1)) {
+
+			con_log(CL_ANN, (KERN_INFO
+			"megaraid[%d]: physical device scan re-enabled\n",
+				adapter->host->host_no));
+			rdev->fast_load = 0;
+		}
+
+		/*
+		 * Display the channel scan for physical devices
+		 */
+		if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) {
+
+			ss = rdev->fast_load ? skip : scan;
+
+			con_log(CL_ANN, (KERN_INFO
+				"scsi[%d]: %s scsi channel %d [Phy %d]",
+				adapter->host->host_no, ss, SCP2CHANNEL(scp),
+				channel));
+
+			con_log(CL_ANN, (
+				" for non-raid devices\n"));
+
+			rdev->last_disp |= (1L << SCP2CHANNEL(scp));
+		}
+
+		// disable channel sweep if fast load option given
+		if (rdev->fast_load) {
+			scp->result = (DID_BAD_TARGET << 16);
+			return NULL;
+		}
+
+		// Allocate a SCB and initialize passthru
+		if (!(scb = megaraid_alloc_scb(adapter, scp))) {
+			scp->result = (DID_ERROR << 16);
+			*busy = 1;
+			return NULL;
+		}
+
+		ccb			= (mbox_ccb_t *)scb->ccb;
+		scb->dev_channel	= channel;
+		scb->dev_target		= target;
+		scb->dma_direction	= scp->sc_data_direction;
+		mbox			= ccb->mbox;
+		mbox64			= ccb->mbox64;
+
+		// Does this firmware support extended CDBs
+		if (adapter->max_cdb_sz == 16) {
+			mbox->cmd		= MBOXCMD_EXTPTHRU;
+
+			megaraid_mbox_prepare_epthru(adapter, scb, scp);
+
+			mbox64->xferaddr_lo	= (uint32_t)ccb->epthru_dma_h;
+			mbox64->xferaddr_hi	= 0;
+			mbox->xferaddr		= 0xFFFFFFFF;
+		}
+		else {
+			mbox->cmd = MBOXCMD_PASSTHRU64;
+
+			megaraid_mbox_prepare_pthru(adapter, scb, scp);
+
+			mbox64->xferaddr_lo	= (uint32_t)ccb->pthru_dma_h;
+			mbox64->xferaddr_hi	= 0;
+			mbox->xferaddr		= 0xFFFFFFFF;
+		}
+		return scb;
+	}
+
+	// NOT REACHED
+}
+
+
+/**
+ * megaraid_mbox_runpendq - execute commands queued in the pending queue
+ * @adapter	: controller's soft state
+ * @scb_q	: SCB to be queued in the pending list
+ *
+ * Scan the pending list for commands which are not yet issued and try to
+ * post to the controller. The SCB can be a null pointer, which would indicate
+ * no SCB to be queue, just try to execute the ones in the pending list.
+ *
+ * NOTE: We do not actually traverse the pending list. The SCBs are plucked
+ * out from the head of the pending list. If it is successfully issued, the
+ * next SCB is at the head now.
+ */
+static void
+megaraid_mbox_runpendq(adapter_t *adapter, scb_t *scb_q)
+{
+	scb_t			*scb;
+	unsigned long		flags;
+
+	spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
+
+	if (scb_q) {
+		scb_q->state = SCB_PENDQ;
+		list_add_tail(&scb_q->list, &adapter->pend_list);
+	}
+
+	// if the adapter in not in quiescent mode, post the commands to FW
+	if (adapter->quiescent) {
+		spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
+		return;
+	}
+
+	while (!list_empty(&adapter->pend_list)) {
+
+		assert_spin_locked(PENDING_LIST_LOCK(adapter));
+
+		scb = list_entry(adapter->pend_list.next, scb_t, list);
+
+		// remove the scb from the pending list and try to
+		// issue. If we are unable to issue it, put back in
+		// the pending list and return
+
+		list_del_init(&scb->list);
+
+		spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
+
+		// if mailbox was busy, return SCB back to pending
+		// list. Make sure to add at the head, since that's
+		// where it would have been removed from
+
+		scb->state = SCB_ISSUED;
+
+		if (mbox_post_cmd(adapter, scb) != 0) {
+
+			spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
+
+			scb->state = SCB_PENDQ;
+
+			list_add(&scb->list, &adapter->pend_list);
+
+			spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter),
+				flags);
+
+			return;
+		}
+
+		spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
+	}
+
+	spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
+
+
+	return;
+}
+
+
+/**
+ * megaraid_mbox_prepare_pthru - prepare a command for physical devices
+ * @adapter	: pointer to controller's soft state
+ * @scb		: scsi control block
+ * @scp		: scsi command from the mid-layer
+ *
+ * Prepare a command for the scsi physical devices.
+ */
+static void
+megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
+		struct scsi_cmnd *scp)
+{
+	mbox_ccb_t		*ccb;
+	mraid_passthru_t	*pthru;
+	uint8_t			channel;
+	uint8_t			target;
+
+	ccb	= (mbox_ccb_t *)scb->ccb;
+	pthru	= ccb->pthru;
+	channel	= scb->dev_channel;
+	target	= scb->dev_target;
+
+	// 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout
+	pthru->timeout		= 4;	
+	pthru->ars		= 1;
+	pthru->islogical	= 0;
+	pthru->channel		= 0;
+	pthru->target		= (channel << 4) | target;
+	pthru->logdrv		= SCP2LUN(scp);
+	pthru->reqsenselen	= 14;
+	pthru->cdblen		= scp->cmd_len;
+
+	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
+
+	if (scsi_bufflen(scp)) {
+		pthru->dataxferlen	= scsi_bufflen(scp);
+		pthru->dataxferaddr	= ccb->sgl_dma_h;
+		pthru->numsge		= megaraid_mbox_mksgl(adapter, scb);
+	}
+	else {
+		pthru->dataxferaddr	= 0;
+		pthru->dataxferlen	= 0;
+		pthru->numsge		= 0;
+	}
+	return;
+}
+
+
+/**
+ * megaraid_mbox_prepare_epthru - prepare a command for physical devices
+ * @adapter	: pointer to controller's soft state
+ * @scb		: scsi control block
+ * @scp		: scsi command from the mid-layer
+ *
+ * Prepare a command for the scsi physical devices. This routine prepares
+ * commands for devices which can take extended CDBs (>10 bytes).
+ */
+static void
+megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb,
+		struct scsi_cmnd *scp)
+{
+	mbox_ccb_t		*ccb;
+	mraid_epassthru_t	*epthru;
+	uint8_t			channel;
+	uint8_t			target;
+
+	ccb	= (mbox_ccb_t *)scb->ccb;
+	epthru	= ccb->epthru;
+	channel	= scb->dev_channel;
+	target	= scb->dev_target;
+
+	// 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout
+	epthru->timeout		= 4;	
+	epthru->ars		= 1;
+	epthru->islogical	= 0;
+	epthru->channel		= 0;
+	epthru->target		= (channel << 4) | target;
+	epthru->logdrv		= SCP2LUN(scp);
+	epthru->reqsenselen	= 14;
+	epthru->cdblen		= scp->cmd_len;
+
+	memcpy(epthru->cdb, scp->cmnd, scp->cmd_len);
+
+	if (scsi_bufflen(scp)) {
+		epthru->dataxferlen	= scsi_bufflen(scp);
+		epthru->dataxferaddr	= ccb->sgl_dma_h;
+		epthru->numsge		= megaraid_mbox_mksgl(adapter, scb);
+	}
+	else {
+		epthru->dataxferaddr	= 0;
+		epthru->dataxferlen	= 0;
+		epthru->numsge		= 0;
+	}
+	return;
+}
+
+
+/**
+ * megaraid_ack_sequence - interrupt ack sequence for memory mapped HBAs
+ * @adapter	: controller's soft state
+ *
+ * Interrupt acknowledgement sequence for memory mapped HBAs. Find out the
+ * completed command and put them on the completed list for later processing.
+ *
+ * Returns:	1 if the interrupt is valid, 0 otherwise
+ */
+static int
+megaraid_ack_sequence(adapter_t *adapter)
+{
+	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);
+	mbox_t			*mbox;
+	scb_t			*scb;
+	uint8_t			nstatus;
+	uint8_t			completed[MBOX_MAX_FIRMWARE_STATUS];
+	struct list_head	clist;
+	int			handled;
+	uint32_t		dword;
+	unsigned long		flags;
+	int			i, j;
+
+
+	mbox	= raid_dev->mbox;
+
+	// move the SCBs from the firmware completed array to our local list
+	INIT_LIST_HEAD(&clist);
+
+	// loop till F/W has more commands for us to complete
+	handled = 0;
+	spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
+	do {
+		/*
+		 * Check if a valid interrupt is pending. If found, force the
+		 * interrupt line low.
+		 */
+		dword = RDOUTDOOR(raid_dev);
+		if (dword != 0x10001234) break;
+
+		handled = 1;
+
+		WROUTDOOR(raid_dev, 0x10001234);
+
+		nstatus = 0;
+		// wait for valid numstatus to post
+		for (i = 0; i < 0xFFFFF; i++) {
+			if (mbox->numstatus != 0xFF) {
+				nstatus = mbox->numstatus;
+				break;
+			}
+			rmb();
+		}
+		mbox->numstatus = 0xFF;
+
+		adapter->outstanding_cmds -= nstatus;
+
+		for (i = 0; i < nstatus; i++) {
+
+			// wait for valid command index to post
+			for (j = 0; j < 0xFFFFF; j++) {
+				if (mbox->completed[i] != 0xFF) break;
+				rmb();
+			}
+			completed[i]		= mbox->completed[i];
+			mbox->completed[i]	= 0xFF;
+
+			if (completed[i] == 0xFF) {
+				con_log(CL_ANN, (KERN_CRIT
+				"megaraid: command posting timed out\n"));
+
+				BUG();
+				continue;
+			}
+
+			// Get SCB associated with this command id
+			if (completed[i] >= MBOX_MAX_SCSI_CMDS) {
+				// a cmm command
+				scb = adapter->uscb_list + (completed[i] -
+						MBOX_MAX_SCSI_CMDS);
+			}
+			else {
+				// an os command
+				scb = adapter->kscb_list + completed[i];
+			}
+
+			scb->status = mbox->status;
+			list_add_tail(&scb->list, &clist);
+		}
+
+		// Acknowledge interrupt
+		WRINDOOR(raid_dev, 0x02);
+
+	} while(1);
+
+	spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
+
+
+	// put the completed commands in the completed list. DPC would
+	// complete these commands later
+	spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
+
+	list_splice(&clist, &adapter->completed_list);
+
+	spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
+
+
+	// schedule the DPC if there is some work for it
+	if (handled)
+		tasklet_schedule(&adapter->dpc_h);
+
+	return handled;
+}
+
+
+/**
+ * megaraid_isr - isr for memory based mailbox based controllers
+ * @irq		: irq
+ * @devp	: pointer to our soft state
+ *
+ * Interrupt service routine for memory-mapped mailbox controllers.
+ */
+static irqreturn_t
+megaraid_isr(int irq, void *devp)
+{
+	adapter_t	*adapter = devp;
+	int		handled;
+
+	handled = megaraid_ack_sequence(adapter);
+
+	/* Loop through any pending requests */
+	if (!adapter->quiescent) {
+		megaraid_mbox_runpendq(adapter, NULL);
+	}
+
+	return IRQ_RETVAL(handled);
+}
+
+
+/**
+ * megaraid_mbox_sync_scb - sync kernel buffers
+ * @adapter	: controller's soft state
+ * @scb		: pointer to the resource packet
+ *
+ * DMA sync if required.
+ */
+static void
+megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
+{
+	mbox_ccb_t	*ccb;
+
+	ccb	= (mbox_ccb_t *)scb->ccb;
+
+	if (scb->dma_direction == PCI_DMA_FROMDEVICE)
+		pci_dma_sync_sg_for_cpu(adapter->pdev,
+					scsi_sglist(scb->scp),
+					scsi_sg_count(scb->scp),
+					PCI_DMA_FROMDEVICE);
+
+	scsi_dma_unmap(scb->scp);
+	return;
+}
+
+
+/**
+ * megaraid_mbox_dpc - the tasklet to complete the commands from completed list
+ * @devp	: pointer to HBA soft state
+ *
+ * Pick up the commands from the completed list and send back to the owners.
+ * This is a reentrant function and does not assume any locks are held while
+ * it is being called.
+ */
+static void
+megaraid_mbox_dpc(unsigned long devp)
+{
+	adapter_t		*adapter = (adapter_t *)devp;
+	mraid_device_t		*raid_dev;
+	struct list_head	clist;
+	struct scatterlist	*sgl;
+	scb_t			*scb;
+	scb_t			*tmp;
+	struct scsi_cmnd	*scp;
+	mraid_passthru_t	*pthru;
+	mraid_epassthru_t	*epthru;
+	mbox_ccb_t		*ccb;
+	int			islogical;
+	int			pdev_index;
+	int			pdev_state;
+	mbox_t			*mbox;
+	unsigned long		flags;
+	uint8_t			c;
+	int			status;
+	uioc_t			*kioc;
+
+
+	if (!adapter) return;
+
+	raid_dev = ADAP2RAIDDEV(adapter);
+
+	// move the SCBs from the completed list to our local list
+	INIT_LIST_HEAD(&clist);
+
+	spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
+
+	list_splice_init(&adapter->completed_list, &clist);
+
+	spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
+
+
+	list_for_each_entry_safe(scb, tmp, &clist, list) {
+
+		status		= scb->status;
+		scp		= scb->scp;
+		ccb		= (mbox_ccb_t *)scb->ccb;
+		pthru		= ccb->pthru;
+		epthru		= ccb->epthru;
+		mbox		= ccb->mbox;
+
+		// Make sure f/w has completed a valid command
+		if (scb->state != SCB_ISSUED) {
+			con_log(CL_ANN, (KERN_CRIT
+			"megaraid critical err: invalid command %d:%d:%p\n",
+				scb->sno, scb->state, scp));
+			BUG();
+			continue;	// Must never happen!
+		}
+
+		// check for the management command and complete it right away
+		if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
+			scb->state	= SCB_FREE;
+			scb->status	= status;
+
+			// remove from local clist
+			list_del_init(&scb->list);
+
+			kioc			= (uioc_t *)scb->gp;
+			kioc->status		= 0;
+
+			megaraid_mbox_mm_done(adapter, scb);
+
+			continue;
+		}
+
+		// Was an abort issued for this command earlier
+		if (scb->state & SCB_ABORT) {
+			con_log(CL_ANN, (KERN_NOTICE
+			"megaraid: aborted cmd [%x] completed\n",
+				scb->sno));
+		}
+
+		/*
+		 * If the inquiry came of a disk drive which is not part of
+		 * any RAID array, expose it to the kernel. For this to be
+		 * enabled, user must set the "megaraid_expose_unconf_disks"
+		 * flag to 1 by specifying it on module parameter list.
+		 * This would enable data migration off drives from other
+		 * configurations.
+		 */
+		islogical = MRAID_IS_LOGICAL(adapter, scp);
+		if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0
+				&& IS_RAID_CH(raid_dev, scb->dev_channel)) {
+
+			sgl = scsi_sglist(scp);
+			if (sg_page(sgl)) {
+				c = *(unsigned char *) sg_virt(&sgl[0]);
+			} else {
+				con_log(CL_ANN, (KERN_WARNING
+						 "megaraid mailbox: invalid sg:%d\n",
+						 __LINE__));
+				c = 0;
+			}
+
+			if ((c & 0x1F ) == TYPE_DISK) {
+				pdev_index = (scb->dev_channel * 16) +
+					scb->dev_target;
+				pdev_state =
+					raid_dev->pdrv_state[pdev_index] & 0x0F;
+
+				if (pdev_state == PDRV_ONLINE		||
+					pdev_state == PDRV_FAILED	||
+					pdev_state == PDRV_RBLD		||
+					pdev_state == PDRV_HOTSPARE	||
+					megaraid_expose_unconf_disks == 0) {
+
+					status = 0xF0;
+				}
+			}
+		}
+
+		// Convert MegaRAID status to Linux error code
+		switch (status) {
+
+		case 0x00:
+
+			scp->result = (DID_OK << 16);
+			break;
+
+		case 0x02:
+
+			/* set sense_buffer and result fields */
+			if (mbox->cmd == MBOXCMD_PASSTHRU ||
+				mbox->cmd == MBOXCMD_PASSTHRU64) {
+
+				memcpy(scp->sense_buffer, pthru->reqsensearea,
+						14);
+
+				scp->result = DRIVER_SENSE << 24 |
+					DID_OK << 16 | CHECK_CONDITION << 1;
+			}
+			else {
+				if (mbox->cmd == MBOXCMD_EXTPTHRU) {
+
+					memcpy(scp->sense_buffer,
+						epthru->reqsensearea, 14);
+
+					scp->result = DRIVER_SENSE << 24 |
+						DID_OK << 16 |
+						CHECK_CONDITION << 1;
+				} else {
+					scp->sense_buffer[0] = 0x70;
+					scp->sense_buffer[2] = ABORTED_COMMAND;
+					scp->result = CHECK_CONDITION << 1;
+				}
+			}
+			break;
+
+		case 0x08:
+
+			scp->result = DID_BUS_BUSY << 16 | status;
+			break;
+
+		default:
+
+			/*
+			 * If TEST_UNIT_READY fails, we know RESERVATION_STATUS
+			 * failed
+			 */
+			if (scp->cmnd[0] == TEST_UNIT_READY) {
+				scp->result = DID_ERROR << 16 |
+					RESERVATION_CONFLICT << 1;
+			}
+			else
+			/*
+			 * Error code returned is 1 if Reserve or Release
+			 * failed or the input parameter is invalid
+			 */
+			if (status == 1 && (scp->cmnd[0] == RESERVE ||
+					 scp->cmnd[0] == RELEASE)) {
+
+				scp->result = DID_ERROR << 16 |
+					RESERVATION_CONFLICT << 1;
+			}
+			else {
+				scp->result = DID_BAD_TARGET << 16 | status;
+			}
+		}
+
+		// print a debug message for all failed commands
+		if (status) {
+			megaraid_mbox_display_scb(adapter, scb);
+		}
+
+		// Free our internal resources and call the mid-layer callback
+		// routine
+		megaraid_mbox_sync_scb(adapter, scb);
+
+		// remove from local clist
+		list_del_init(&scb->list);
+
+		// put back in free list
+		megaraid_dealloc_scb(adapter, scb);
+
+		// send the scsi packet back to kernel
+		scp->scsi_done(scp);
+	}
+
+	return;
+}
+
+
+/**
+ * megaraid_abort_handler - abort the scsi command
+ * @scp		: command to be aborted
+ *
+ * Abort a previous SCSI request. Only commands on the pending list can be
+ * aborted. All the commands issued to the F/W must complete.
+ **/
+static int
+megaraid_abort_handler(struct scsi_cmnd *scp)
+{
+	adapter_t		*adapter;
+	mraid_device_t		*raid_dev;
+	scb_t			*scb;
+	scb_t			*tmp;
+	int			found;
+	unsigned long		flags;
+	int			i;
+
+
+	adapter		= SCP2ADAPTER(scp);
+	raid_dev	= ADAP2RAIDDEV(adapter);
+
+	con_log(CL_ANN, (KERN_WARNING
+		"megaraid: aborting cmd=%x <c=%d t=%d l=%d>\n",
+		scp->cmnd[0], SCP2CHANNEL(scp),
+		SCP2TARGET(scp), SCP2LUN(scp)));
+
+	// If FW has stopped responding, simply return failure
+	if (raid_dev->hw_error) {
+		con_log(CL_ANN, (KERN_NOTICE
+			"megaraid: hw error, not aborting\n"));
+		return FAILED;
+	}
+
+	// There might a race here, where the command was completed by the
+	// firmware and now it is on the completed list. Before we could
+	// complete the command to the kernel in dpc, the abort came.
+	// Find out if this is the case to avoid the race.
+	scb = NULL;
+	spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
+	list_for_each_entry_safe(scb, tmp, &adapter->completed_list, list) {
+
+		if (scb->scp == scp) {	// Found command
+
+			list_del_init(&scb->list);	// from completed list
+
+			con_log(CL_ANN, (KERN_WARNING
+			"megaraid: %d[%d:%d], abort from completed list\n",
+				scb->sno, scb->dev_channel, scb->dev_target));
+
+			scp->result = (DID_ABORT << 16);
+			scp->scsi_done(scp);
+
+			megaraid_dealloc_scb(adapter, scb);
+
+			spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter),
+				flags);
+
+			return SUCCESS;
+		}
+	}
+	spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
+
+
+	// Find out if this command is still on the pending list. If it is and
+	// was never issued, abort and return success. If the command is owned
+	// by the firmware, we must wait for it to complete by the FW.
+	spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
+	list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
+
+		if (scb->scp == scp) {	// Found command
+
+			list_del_init(&scb->list);	// from pending list
+
+			ASSERT(!(scb->state & SCB_ISSUED));
+
+			con_log(CL_ANN, (KERN_WARNING
+				"megaraid abort: [%d:%d], driver owner\n",
+				scb->dev_channel, scb->dev_target));
+
+			scp->result = (DID_ABORT << 16);
+			scp->scsi_done(scp);
+
+			megaraid_dealloc_scb(adapter, scb);
+
+			spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter),
+				flags);
+
+			return SUCCESS;
+		}
+	}
+	spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
+
+
+	// Check do we even own this command, in which case this would be
+	// owned by the firmware. The only way to locate the FW scb is to
+	// traverse through the list of all SCB, since driver does not
+	// maintain these SCBs on any list
+	found = 0;
+	spin_lock_irq(&adapter->lock);
+	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
+		scb = adapter->kscb_list + i;
+
+		if (scb->scp == scp) {
+
+			found = 1;
+
+			if (!(scb->state & SCB_ISSUED)) {
+				con_log(CL_ANN, (KERN_WARNING
+				"megaraid abort: %d[%d:%d], invalid state\n",
+				scb->sno, scb->dev_channel, scb->dev_target));
+				BUG();
+			}
+			else {
+				con_log(CL_ANN, (KERN_WARNING
+				"megaraid abort: %d[%d:%d], fw owner\n",
+				scb->sno, scb->dev_channel, scb->dev_target));
+			}
+		}
+	}
+	spin_unlock_irq(&adapter->lock);
+
+	if (!found) {
+		con_log(CL_ANN, (KERN_WARNING "megaraid abort: do now own\n"));
+
+		// FIXME: Should there be a callback for this command?
+		return SUCCESS;
+	}
+
+	// We cannot actually abort a command owned by firmware, return
+	// failure and wait for reset. In host reset handler, we will find out
+	// if the HBA is still live
+	return FAILED;
+}
+
+/**
+ * megaraid_reset_handler - device reset handler for mailbox based driver
+ * @scp		: reference command
+ *
+ * Reset handler for the mailbox based controller. First try to find out if
+ * the FW is still live, in which case the outstanding commands counter mut go
+ * down to 0. If that happens, also issue the reservation reset command to
+ * relinquish (possible) reservations on the logical drives connected to this
+ * host.
+ **/
+static int
+megaraid_reset_handler(struct scsi_cmnd *scp)
+{
+	adapter_t	*adapter;
+	scb_t		*scb;
+	scb_t		*tmp;
+	mraid_device_t	*raid_dev;
+	unsigned long	flags;
+	uint8_t		raw_mbox[sizeof(mbox_t)];
+	int		rval;
+	int		recovery_window;
+	int		recovering;
+	int		i;
+	uioc_t		*kioc;
+
+	adapter		= SCP2ADAPTER(scp);
+	raid_dev	= ADAP2RAIDDEV(adapter);
+
+	// return failure if adapter is not responding
+	if (raid_dev->hw_error) {
+		con_log(CL_ANN, (KERN_NOTICE
+			"megaraid: hw error, cannot reset\n"));
+		return FAILED;
+	}
+
+
+	// Under exceptional conditions, FW can take up to 3 minutes to
+	// complete command processing. Wait for additional 2 minutes for the
+	// pending commands counter to go down to 0. If it doesn't, let the
+	// controller be marked offline
+	// Also, reset all the commands currently owned by the driver
+	spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
+	list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
+		list_del_init(&scb->list);	// from pending list
+
+		if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
+			con_log(CL_ANN, (KERN_WARNING
+			"megaraid: IOCTL packet with %d[%d:%d] being reset\n",
+			scb->sno, scb->dev_channel, scb->dev_target));
+
+			scb->status = -1;
+
+			kioc			= (uioc_t *)scb->gp;
+			kioc->status		= -EFAULT;
+
+			megaraid_mbox_mm_done(adapter, scb);
+		} else {
+			if (scb->scp == scp) {	// Found command
+				con_log(CL_ANN, (KERN_WARNING
+					"megaraid: %d[%d:%d], reset from pending list\n",
+					scb->sno, scb->dev_channel, scb->dev_target));
+			} else {
+				con_log(CL_ANN, (KERN_WARNING
+				"megaraid: IO packet with %d[%d:%d] being reset\n",
+				scb->sno, scb->dev_channel, scb->dev_target));
+			}
+
+			scb->scp->result = (DID_RESET << 16);
+			scb->scp->scsi_done(scb->scp);
+
+			megaraid_dealloc_scb(adapter, scb);
+		}
+	}
+	spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
+
+	if (adapter->outstanding_cmds) {
+		con_log(CL_ANN, (KERN_NOTICE
+			"megaraid: %d outstanding commands. Max wait %d sec\n",
+			adapter->outstanding_cmds,
+			(MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT)));
+	}
+
+	recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
+
+	recovering = adapter->outstanding_cmds;
+
+	for (i = 0; i < recovery_window; i++) {
+
+		megaraid_ack_sequence(adapter);
+
+		// print a message once every 5 seconds only
+		if (!(i % 5)) {
+			con_log(CL_ANN, (
+			"megaraid mbox: Wait for %d commands to complete:%d\n",
+				adapter->outstanding_cmds,
+				(MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT) - i));
+		}
+
+		// bailout if no recovery happened in reset time
+		if (adapter->outstanding_cmds == 0) {
+			break;
+		}
+
+		msleep(1000);
+	}
+
+	spin_lock(&adapter->lock);
+
+	// If still outstanding commands, bail out
+	if (adapter->outstanding_cmds) {
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid mbox: critical hardware error!\n"));
+
+		raid_dev->hw_error = 1;
+
+		rval = FAILED;
+		goto out;
+	}
+	else {
+		con_log(CL_ANN, (KERN_NOTICE
+		"megaraid mbox: reset sequence completed successfully\n"));
+	}
+
+
+	// If the controller supports clustering, reset reservations
+	if (!adapter->ha) {
+		rval = SUCCESS;
+		goto out;
+	}
+
+	// clear reservations if any
+	raw_mbox[0] = CLUSTER_CMD;
+	raw_mbox[2] = RESET_RESERVATIONS;
+
+	rval = SUCCESS;
+	if (mbox_post_sync_cmd_fast(adapter, raw_mbox) == 0) {
+		con_log(CL_ANN,
+			(KERN_INFO "megaraid: reservation reset\n"));
+	}
+	else {
+		rval = FAILED;
+		con_log(CL_ANN, (KERN_WARNING
+				"megaraid: reservation reset failed\n"));
+	}
+
+ out:
+	spin_unlock(&adapter->lock);
+	return rval;
+}
+
+/*
+ * START: internal commands library
+ *
+ * This section of the driver has the common routine used by the driver and
+ * also has all the FW routines
+ */
+
+/**
+ * mbox_post_sync_cmd() - blocking command to the mailbox based controllers
+ * @adapter	: controller's soft state
+ * @raw_mbox	: the mailbox
+ *
+ * Issue a scb in synchronous and non-interrupt mode for mailbox based
+ * controllers.
+ */
+static int
+mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
+{
+	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
+	mbox64_t	*mbox64;
+	mbox_t		*mbox;
+	uint8_t		status;
+	int		i;
+
+
+	mbox64	= raid_dev->mbox64;
+	mbox	= raid_dev->mbox;
+
+	/*
+	 * Wait until mailbox is free
+	 */
+	if (megaraid_busywait_mbox(raid_dev) != 0)
+		goto blocked_mailbox;
+
+	/*
+	 * Copy mailbox data into host structure
+	 */
+	memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16);
+	mbox->cmdid		= 0xFE;
+	mbox->busy		= 1;
+	mbox->poll		= 0;
+	mbox->ack		= 0;
+	mbox->numstatus		= 0xFF;
+	mbox->status		= 0xFF;
+
+	wmb();
+	WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
+
+	// wait for maximum 1 second for status to post. If the status is not
+	// available within 1 second, assume FW is initializing and wait
+	// for an extended amount of time
+	if (mbox->numstatus == 0xFF) {	// status not yet available
+		udelay(25);
+
+		for (i = 0; mbox->numstatus == 0xFF && i < 1000; i++) {
+			rmb();
+			msleep(1);
+		}
+
+
+		if (i == 1000) {
+			con_log(CL_ANN, (KERN_NOTICE
+				"megaraid mailbox: wait for FW to boot      "));
+
+			for (i = 0; (mbox->numstatus == 0xFF) &&
+					(i < MBOX_RESET_WAIT); i++) {
+				rmb();
+				con_log(CL_ANN, ("\b\b\b\b\b[%03d]",
+							MBOX_RESET_WAIT - i));
+				msleep(1000);
+			}
+
+			if (i == MBOX_RESET_WAIT) {
+
+				con_log(CL_ANN, (
+				"\nmegaraid mailbox: status not available\n"));
+
+				return -1;
+			}
+			con_log(CL_ANN, ("\b\b\b\b\b[ok] \n"));
+		}
+	}
+
+	// wait for maximum 1 second for poll semaphore
+	if (mbox->poll != 0x77) {
+		udelay(25);
+
+		for (i = 0; (mbox->poll != 0x77) && (i < 1000); i++) {
+			rmb();
+			msleep(1);
+		}
+
+		if (i == 1000) {
+			con_log(CL_ANN, (KERN_WARNING
+			"megaraid mailbox: could not get poll semaphore\n"));
+			return -1;
+		}
+	}
+
+	WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2);
+	wmb();
+
+	// wait for maximum 1 second for acknowledgement
+	if (RDINDOOR(raid_dev) & 0x2) {
+		udelay(25);
+
+		for (i = 0; (RDINDOOR(raid_dev) & 0x2) && (i < 1000); i++) {
+			rmb();
+			msleep(1);
+		}
+
+		if (i == 1000) {
+			con_log(CL_ANN, (KERN_WARNING
+				"megaraid mailbox: could not acknowledge\n"));
+			return -1;
+		}
+	}
+	mbox->poll	= 0;
+	mbox->ack	= 0x77;
+
+	status = mbox->status;
+
+	// invalidate the completed command id array. After command
+	// completion, firmware would write the valid id.
+	mbox->numstatus	= 0xFF;
+	mbox->status	= 0xFF;
+	for (i = 0; i < MBOX_MAX_FIRMWARE_STATUS; i++) {
+		mbox->completed[i] = 0xFF;
+	}
+
+	return status;
+
+blocked_mailbox:
+
+	con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n") );
+	return -1;
+}
+
+
+/**
+ * mbox_post_sync_cmd_fast - blocking command to the mailbox based controllers
+ * @adapter	: controller's soft state
+ * @raw_mbox	: the mailbox
+ *
+ * Issue a scb in synchronous and non-interrupt mode for mailbox based
+ * controllers. This is a faster version of the synchronous command and
+ * therefore can be called in interrupt-context as well.
+ */
+static int
+mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[])
+{
+	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
+	mbox_t		*mbox;
+	long		i;
+
+
+	mbox	= raid_dev->mbox;
+
+	// return immediately if the mailbox is busy
+	if (mbox->busy) return -1;
+
+	// Copy mailbox data into host structure
+	memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 14);
+	mbox->cmdid		= 0xFE;
+	mbox->busy		= 1;
+	mbox->poll		= 0;
+	mbox->ack		= 0;
+	mbox->numstatus		= 0xFF;
+	mbox->status		= 0xFF;
+
+	wmb();
+	WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
+
+	for (i = 0; i < MBOX_SYNC_WAIT_CNT; i++) {
+		if (mbox->numstatus != 0xFF) break;
+		rmb();
+		udelay(MBOX_SYNC_DELAY_200);
+	}
+
+	if (i == MBOX_SYNC_WAIT_CNT) {
+		// We may need to re-calibrate the counter
+		con_log(CL_ANN, (KERN_CRIT
+			"megaraid: fast sync command timed out\n"));
+	}
+
+	WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2);
+	wmb();
+
+	return mbox->status;
+}
+
+
+/**
+ * megaraid_busywait_mbox() - Wait until the controller's mailbox is available
+ * @raid_dev	: RAID device (HBA) soft state
+ *
+ * Wait until the controller's mailbox is available to accept more commands.
+ * Wait for at most 1 second.
+ */
+static int
+megaraid_busywait_mbox(mraid_device_t *raid_dev)
+{
+	mbox_t	*mbox = raid_dev->mbox;
+	int	i = 0;
+
+	if (mbox->busy) {
+		udelay(25);
+		for (i = 0; mbox->busy && i < 1000; i++)
+			msleep(1);
+	}
+
+	if (i < 1000) return 0;
+	else return -1;
+}
+
+
+/**
+ * megaraid_mbox_product_info - some static information about the controller
+ * @adapter	: our soft state
+ *
+ * Issue commands to the controller to grab some parameters required by our
+ * caller.
+ */
+static int
+megaraid_mbox_product_info(adapter_t *adapter)
+{
+	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);
+	mbox_t			*mbox;
+	uint8_t			raw_mbox[sizeof(mbox_t)];
+	mraid_pinfo_t		*pinfo;
+	dma_addr_t		pinfo_dma_h;
+	mraid_inquiry3_t	*mraid_inq3;
+	int			i;
+
+
+	memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
+	mbox = (mbox_t *)raw_mbox;
+
+	/*
+	 * Issue an ENQUIRY3 command to find out certain adapter parameters,
+	 * e.g., max channels, max commands etc.
+	 */
+	pinfo = pci_zalloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+				      &pinfo_dma_h);
+
+	if (pinfo == NULL) {
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid: out of memory, %s %d\n", __func__,
+			__LINE__));
+
+		return -1;
+	}
+
+	mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
+	memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
+
+	raw_mbox[0] = FC_NEW_CONFIG;
+	raw_mbox[2] = NC_SUBOP_ENQUIRY3;
+	raw_mbox[3] = ENQ3_GET_SOLICITED_FULL;
+
+	// Issue the command
+	if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
+
+		con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n"));
+
+		pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+			pinfo, pinfo_dma_h);
+
+		return -1;
+	}
+
+	/*
+	 * Collect information about state of each physical drive
+	 * attached to the controller. We will expose all the disks
+	 * which are not part of RAID
+	 */
+	mraid_inq3 = (mraid_inquiry3_t *)adapter->ibuf;
+	for (i = 0; i < MBOX_MAX_PHYSICAL_DRIVES; i++) {
+		raid_dev->pdrv_state[i] = mraid_inq3->pdrv_state[i];
+	}
+
+	/*
+	 * Get product info for information like number of channels,
+	 * maximum commands supported.
+	 */
+	memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
+	mbox->xferaddr = (uint32_t)pinfo_dma_h;
+
+	raw_mbox[0] = FC_NEW_CONFIG;
+	raw_mbox[2] = NC_SUBOP_PRODUCT_INFO;
+
+	if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
+
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid: product info failed\n"));
+
+		pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+			pinfo, pinfo_dma_h);
+
+		return -1;
+	}
+
+	/*
+	 * Setup some parameters for host, as required by our caller
+	 */
+	adapter->max_channel = pinfo->nchannels;
+
+	/*
+	 * we will export all the logical drives on a single channel.
+	 * Add 1 since inquires do not come for inititor ID
+	 */
+	adapter->max_target	= MAX_LOGICAL_DRIVES_40LD + 1;
+	adapter->max_lun	= 8;	// up to 8 LUNs for non-disk devices
+
+	/*
+	 * These are the maximum outstanding commands for the scsi-layer
+	 */
+	adapter->max_cmds	= MBOX_MAX_SCSI_CMDS;
+
+	memset(adapter->fw_version, 0, VERSION_SIZE);
+	memset(adapter->bios_version, 0, VERSION_SIZE);
+
+	memcpy(adapter->fw_version, pinfo->fw_version, 4);
+	adapter->fw_version[4] = 0;
+
+	memcpy(adapter->bios_version, pinfo->bios_version, 4);
+	adapter->bios_version[4] = 0;
+
+	con_log(CL_ANN, (KERN_NOTICE
+		"megaraid: fw version:[%s] bios version:[%s]\n",
+		adapter->fw_version, adapter->bios_version));
+
+	pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), pinfo,
+			pinfo_dma_h);
+
+	return 0;
+}
+
+
+
+/**
+ * megaraid_mbox_extended_cdb - check for support for extended CDBs
+ * @adapter	: soft state for the controller
+ *
+ * This routine check whether the controller in question supports extended
+ * ( > 10 bytes ) CDBs.
+ */
+static int
+megaraid_mbox_extended_cdb(adapter_t *adapter)
+{
+	mbox_t		*mbox;
+	uint8_t		raw_mbox[sizeof(mbox_t)];
+	int		rval;
+
+	mbox = (mbox_t *)raw_mbox;
+
+	memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
+	mbox->xferaddr	= (uint32_t)adapter->ibuf_dma_h;
+
+	memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
+
+	raw_mbox[0] = MAIN_MISC_OPCODE;
+	raw_mbox[2] = SUPPORT_EXT_CDB;
+
+	/*
+	 * Issue the command
+	 */
+	rval = 0;
+	if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
+		rval = -1;
+	}
+
+	return rval;
+}
+
+
+/**
+ * megaraid_mbox_support_ha - Do we support clustering
+ * @adapter	: soft state for the controller
+ * @init_id	: ID of the initiator
+ *
+ * Determine if the firmware supports clustering and the ID of the initiator.
+ */
+static int
+megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id)
+{
+	mbox_t		*mbox;
+	uint8_t		raw_mbox[sizeof(mbox_t)];
+	int		rval;
+
+
+	mbox = (mbox_t *)raw_mbox;
+
+	memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
+
+	mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
+
+	memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
+
+	raw_mbox[0] = GET_TARGET_ID;
+
+	// Issue the command
+	*init_id = 7;
+	rval =  -1;
+	if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
+
+		*init_id = *(uint8_t *)adapter->ibuf;
+
+		con_log(CL_ANN, (KERN_INFO
+			"megaraid: cluster firmware, initiator ID: %d\n",
+			*init_id));
+
+		rval =  0;
+	}
+
+	return rval;
+}
+
+
+/**
+ * megaraid_mbox_support_random_del - Do we support random deletion
+ * @adapter	: soft state for the controller
+ *
+ * Determine if the firmware supports random deletion.
+ * Return:	1 is operation supported, 0 otherwise
+ */
+static int
+megaraid_mbox_support_random_del(adapter_t *adapter)
+{
+	mbox_t		*mbox;
+	uint8_t		raw_mbox[sizeof(mbox_t)];
+	int		rval;
+
+	/*
+	 * Newer firmware on Dell CERC expect a different
+	 * random deletion handling, so disable it.
+	 */
+	if (adapter->pdev->vendor == PCI_VENDOR_ID_AMI &&
+	    adapter->pdev->device == PCI_DEVICE_ID_AMI_MEGARAID3 &&
+	    adapter->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
+	    adapter->pdev->subsystem_device == PCI_SUBSYS_ID_CERC_ATA100_4CH &&
+	    (adapter->fw_version[0] > '6' ||
+	     (adapter->fw_version[0] == '6' &&
+	      adapter->fw_version[2] > '6') ||
+	     (adapter->fw_version[0] == '6'
+	      && adapter->fw_version[2] == '6'
+	      && adapter->fw_version[3] > '1'))) {
+		con_log(CL_DLEVEL1, ("megaraid: disable random deletion\n"));
+		return 0;
+	}
+
+	mbox = (mbox_t *)raw_mbox;
+
+	memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
+
+	raw_mbox[0] = FC_DEL_LOGDRV;
+	raw_mbox[2] = OP_SUP_DEL_LOGDRV;
+
+	// Issue the command
+	rval = 0;
+	if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
+
+		con_log(CL_DLEVEL1, ("megaraid: supports random deletion\n"));
+
+		rval =  1;
+	}
+
+	return rval;
+}
+
+
+/**
+ * megaraid_mbox_get_max_sg - maximum sg elements supported by the firmware
+ * @adapter	: soft state for the controller
+ *
+ * Find out the maximum number of scatter-gather elements supported by the
+ * firmware.
+ */
+static int
+megaraid_mbox_get_max_sg(adapter_t *adapter)
+{
+	mbox_t		*mbox;
+	uint8_t		raw_mbox[sizeof(mbox_t)];
+	int		nsg;
+
+
+	mbox = (mbox_t *)raw_mbox;
+
+	memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
+
+	mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
+
+	memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
+
+	raw_mbox[0] = MAIN_MISC_OPCODE;
+	raw_mbox[2] = GET_MAX_SG_SUPPORT;
+
+	// Issue the command
+	if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
+		nsg =  *(uint8_t *)adapter->ibuf;
+	}
+	else {
+		nsg =  MBOX_DEFAULT_SG_SIZE;
+	}
+
+	if (nsg > MBOX_MAX_SG_SIZE) nsg = MBOX_MAX_SG_SIZE;
+
+	return nsg;
+}
+
+
+/**
+ * megaraid_mbox_enum_raid_scsi - enumerate the RAID and SCSI channels
+ * @adapter	: soft state for the controller
+ *
+ * Enumerate the RAID and SCSI channels for ROMB platforms so that channels
+ * can be exported as regular SCSI channels.
+ */
+static void
+megaraid_mbox_enum_raid_scsi(adapter_t *adapter)
+{
+	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
+	mbox_t		*mbox;
+	uint8_t		raw_mbox[sizeof(mbox_t)];
+
+
+	mbox = (mbox_t *)raw_mbox;
+
+	memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
+
+	mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
+
+	memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
+
+	raw_mbox[0] = CHNL_CLASS;
+	raw_mbox[2] = GET_CHNL_CLASS;
+
+	// Issue the command. If the command fails, all channels are RAID
+	// channels
+	raid_dev->channel_class = 0xFF;
+	if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
+		raid_dev->channel_class =  *(uint8_t *)adapter->ibuf;
+	}
+
+	return;
+}
+
+
+/**
+ * megaraid_mbox_flush_cache - flush adapter and disks cache
+ * @adapter		: soft state for the controller
+ *
+ * Flush adapter cache followed by disks cache.
+ */
+static void
+megaraid_mbox_flush_cache(adapter_t *adapter)
+{
+	mbox_t	*mbox;
+	uint8_t	raw_mbox[sizeof(mbox_t)];
+
+
+	mbox = (mbox_t *)raw_mbox;
+
+	memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
+
+	raw_mbox[0] = FLUSH_ADAPTER;
+
+	if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
+		con_log(CL_ANN, ("megaraid: flush adapter failed\n"));
+	}
+
+	raw_mbox[0] = FLUSH_SYSTEM;
+
+	if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
+		con_log(CL_ANN, ("megaraid: flush disks cache failed\n"));
+	}
+
+	return;
+}
+
+
+/**
+ * megaraid_mbox_fire_sync_cmd - fire the sync cmd
+ * @adapter		: soft state for the controller
+ *
+ * Clears the pending cmds in FW and reinits its RAID structs.
+ */
+static int
+megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
+{
+	mbox_t	*mbox;
+	uint8_t	raw_mbox[sizeof(mbox_t)];
+	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
+	mbox64_t *mbox64;
+	int	status = 0;
+	int i;
+	uint32_t dword;
+
+	mbox = (mbox_t *)raw_mbox;
+
+	memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
+
+	raw_mbox[0] = 0xFF;
+
+	mbox64	= raid_dev->mbox64;
+	mbox	= raid_dev->mbox;
+
+	/* Wait until mailbox is free */
+	if (megaraid_busywait_mbox(raid_dev) != 0) {
+		status = 1;
+		goto blocked_mailbox;
+	}
+
+	/* Copy mailbox data into host structure */
+	memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16);
+	mbox->cmdid		= 0xFE;
+	mbox->busy		= 1;
+	mbox->poll		= 0;
+	mbox->ack		= 0;
+	mbox->numstatus		= 0;
+	mbox->status		= 0;
+
+	wmb();
+	WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
+
+	/* Wait for maximum 1 min for status to post.
+	 * If the Firmware SUPPORTS the ABOVE COMMAND,
+	 * mbox->cmd will be set to 0
+	 * else
+	 * the firmware will reject the command with
+	 * mbox->numstatus set to 1
+	 */
+
+	i = 0;
+	status = 0;
+	while (!mbox->numstatus && mbox->cmd == 0xFF) {
+		rmb();
+		msleep(1);
+		i++;
+		if (i > 1000 * 60) {
+			status = 1;
+			break;
+		}
+	}
+	if (mbox->numstatus == 1)
+		status = 1; /*cmd not supported*/
+
+	/* Check for interrupt line */
+	dword = RDOUTDOOR(raid_dev);
+	WROUTDOOR(raid_dev, dword);
+	WRINDOOR(raid_dev,2);
+
+	return status;
+
+blocked_mailbox:
+	con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n"));
+	return status;
+}
+
+/**
+ * megaraid_mbox_display_scb - display SCB information, mostly debug purposes
+ * @adapter		: controller's soft state
+ * @scb			: SCB to be displayed
+ * @level		: debug level for console print
+ *
+ * Diplay information about the given SCB iff the current debug level is
+ * verbose.
+ */
+static void
+megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb)
+{
+	mbox_ccb_t		*ccb;
+	struct scsi_cmnd	*scp;
+	mbox_t			*mbox;
+	int			level;
+	int			i;
+
+
+	ccb	= (mbox_ccb_t *)scb->ccb;
+	scp	= scb->scp;
+	mbox	= ccb->mbox;
+
+	level = CL_DLEVEL3;
+
+	con_log(level, (KERN_NOTICE
+		"megaraid mailbox: status:%#x cmd:%#x id:%#x ", scb->status,
+		mbox->cmd, scb->sno));
+
+	con_log(level, ("sec:%#x lba:%#x addr:%#x ld:%d sg:%d\n",
+		mbox->numsectors, mbox->lba, mbox->xferaddr, mbox->logdrv,
+		mbox->numsge));
+
+	if (!scp) return;
+
+	con_log(level, (KERN_NOTICE "scsi cmnd: "));
+
+	for (i = 0; i < scp->cmd_len; i++) {
+		con_log(level, ("%#2.02x ", scp->cmnd[i]));
+	}
+
+	con_log(level, ("\n"));
+
+	return;
+}
+
+
+/**
+ * megaraid_mbox_setup_device_map - manage device ids
+ * @adapter	: Driver's soft state
+ *
+ * Manage the device ids to have an appropriate mapping between the kernel
+ * scsi addresses and megaraid scsi and logical drive addresses. We export
+ * scsi devices on their actual addresses, whereas the logical drives are
+ * exported on a virtual scsi channel.
+ */
+static void
+megaraid_mbox_setup_device_map(adapter_t *adapter)
+{
+	uint8_t		c;
+	uint8_t		t;
+
+	/*
+	 * First fill the values on the logical drive channel
+	 */
+	for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++)
+		adapter->device_ids[adapter->max_channel][t] =
+			(t < adapter->init_id) ?  t : t - 1;
+
+	adapter->device_ids[adapter->max_channel][adapter->init_id] = 0xFF;
+
+	/*
+	 * Fill the values on the physical devices channels
+	 */
+	for (c = 0; c < adapter->max_channel; c++)
+		for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++)
+			adapter->device_ids[c][t] = (c << 8) | t;
+}
+
+
+/*
+ * END: internal commands library
+ */
+
+/*
+ * START: Interface for the common management module
+ *
+ * This is the module, which interfaces with the common management module to
+ * provide support for ioctl and sysfs
+ */
+
+/**
+ * megaraid_cmm_register - register with the management module
+ * @adapter		: HBA soft state
+ *
+ * Register with the management module, which allows applications to issue
+ * ioctl calls to the drivers. This interface is used by the management module
+ * to setup sysfs support as well.
+ */
+static int
+megaraid_cmm_register(adapter_t *adapter)
+{
+	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
+	mraid_mmadp_t	adp;
+	scb_t		*scb;
+	mbox_ccb_t	*ccb;
+	int		rval;
+	int		i;
+
+	// Allocate memory for the base list of scb for management module.
+	adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
+
+	if (adapter->uscb_list == NULL) {
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid: out of memory, %s %d\n", __func__,
+			__LINE__));
+		return -1;
+	}
+
+
+	// Initialize the synchronization parameters for resources for
+	// commands for management module
+	INIT_LIST_HEAD(&adapter->uscb_pool);
+
+	spin_lock_init(USER_FREE_LIST_LOCK(adapter));
+
+
+
+	// link all the packets. Note, CCB for commands, coming from the
+	// commom management module, mailbox physical address are already
+	// setup by it. We just need placeholder for that in our local command
+	// control blocks
+	for (i = 0; i < MBOX_MAX_USER_CMDS; i++) {
+
+		scb			= adapter->uscb_list + i;
+		ccb			= raid_dev->uccb_list + i;
+
+		scb->ccb		= (caddr_t)ccb;
+		ccb->mbox64		= raid_dev->umbox64 + i;
+		ccb->mbox		= &ccb->mbox64->mbox32;
+		ccb->raw_mbox		= (uint8_t *)ccb->mbox;
+
+		scb->gp			= 0;
+
+		// COMMAND ID 0 - (MBOX_MAX_SCSI_CMDS-1) ARE RESERVED FOR
+		// COMMANDS COMING FROM IO SUBSYSTEM (MID-LAYER)
+		scb->sno		= i + MBOX_MAX_SCSI_CMDS;
+
+		scb->scp		= NULL;
+		scb->state		= SCB_FREE;
+		scb->dma_direction	= PCI_DMA_NONE;
+		scb->dma_type		= MRAID_DMA_NONE;
+		scb->dev_channel	= -1;
+		scb->dev_target		= -1;
+
+		// put scb in the free pool
+		list_add_tail(&scb->list, &adapter->uscb_pool);
+	}
+
+	adp.unique_id		= adapter->unique_id;
+	adp.drvr_type		= DRVRTYPE_MBOX;
+	adp.drvr_data		= (unsigned long)adapter;
+	adp.pdev		= adapter->pdev;
+	adp.issue_uioc		= megaraid_mbox_mm_handler;
+	adp.timeout		= MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
+	adp.max_kioc		= MBOX_MAX_USER_CMDS;
+
+	if ((rval = mraid_mm_register_adp(&adp)) != 0) {
+
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid mbox: did not register with CMM\n"));
+
+		kfree(adapter->uscb_list);
+	}
+
+	return rval;
+}
+
+
+/**
+ * megaraid_cmm_unregister - un-register with the management module
+ * @adapter		: HBA soft state
+ *
+ * Un-register with the management module.
+ * FIXME: mgmt module must return failure for unregister if it has pending
+ * commands in LLD.
+ */
+static int
+megaraid_cmm_unregister(adapter_t *adapter)
+{
+	kfree(adapter->uscb_list);
+	mraid_mm_unregister_adp(adapter->unique_id);
+	return 0;
+}
+
+
+/**
+ * megaraid_mbox_mm_handler - interface for CMM to issue commands to LLD
+ * @drvr_data		: LLD specific data
+ * @kioc		: CMM interface packet
+ * @action		: command action
+ *
+ * This routine is invoked whenever the Common Management Module (CMM) has a
+ * command for us. The 'action' parameter specifies if this is a new command
+ * or otherwise.
+ */
+static int
+megaraid_mbox_mm_handler(unsigned long drvr_data, uioc_t *kioc, uint32_t action)
+{
+	adapter_t *adapter;
+
+	if (action != IOCTL_ISSUE) {
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid: unsupported management action:%#2x\n",
+			action));
+		return (-ENOTSUPP);
+	}
+
+	adapter = (adapter_t *)drvr_data;
+
+	// make sure this adapter is not being detached right now.
+	if (atomic_read(&adapter->being_detached)) {
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid: reject management request, detaching\n"));
+		return (-ENODEV);
+	}
+
+	switch (kioc->opcode) {
+
+	case GET_ADAP_INFO:
+
+		kioc->status =  gather_hbainfo(adapter, (mraid_hba_info_t *)
+					(unsigned long)kioc->buf_vaddr);
+
+		kioc->done(kioc);
+
+		return kioc->status;
+
+	case MBOX_CMD:
+
+		return megaraid_mbox_mm_command(adapter, kioc);
+
+	default:
+		kioc->status = (-EINVAL);
+		kioc->done(kioc);
+		return (-EINVAL);
+	}
+
+	return 0;	// not reached
+}
+
+/**
+ * megaraid_mbox_mm_command - issues commands routed through CMM
+ * @adapter		: HBA soft state
+ * @kioc		: management command packet
+ *
+ * Issues commands, which are routed through the management module.
+ */
+static int
+megaraid_mbox_mm_command(adapter_t *adapter, uioc_t *kioc)
+{
+	struct list_head	*head = &adapter->uscb_pool;
+	mbox64_t		*mbox64;
+	uint8_t			*raw_mbox;
+	scb_t			*scb;
+	mbox_ccb_t		*ccb;
+	unsigned long		flags;
+
+	// detach one scb from free pool
+	spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags);
+
+	if (list_empty(head)) {	// should never happen because of CMM
+
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid mbox: bug in cmm handler, lost resources\n"));
+
+		spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
+
+		return (-EINVAL);
+	}
+
+	scb = list_entry(head->next, scb_t, list);
+	list_del_init(&scb->list);
+
+	spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
+
+	scb->state		= SCB_ACTIVE;
+	scb->dma_type		= MRAID_DMA_NONE;
+	scb->dma_direction	= PCI_DMA_NONE;
+
+	ccb		= (mbox_ccb_t *)scb->ccb;
+	mbox64		= (mbox64_t *)(unsigned long)kioc->cmdbuf;
+	raw_mbox	= (uint8_t *)&mbox64->mbox32;
+
+	memcpy(ccb->mbox64, mbox64, sizeof(mbox64_t));
+
+	scb->gp		= (unsigned long)kioc;
+
+	/*
+	 * If it is a logdrv random delete operation, we have to wait till
+	 * there are no outstanding cmds at the fw and then issue it directly
+	 */
+	if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) {
+
+		if (wait_till_fw_empty(adapter)) {
+			con_log(CL_ANN, (KERN_NOTICE
+				"megaraid mbox: LD delete, timed out\n"));
+
+			kioc->status = -ETIME;
+
+			scb->status = -1;
+
+			megaraid_mbox_mm_done(adapter, scb);
+
+			return (-ETIME);
+		}
+
+		INIT_LIST_HEAD(&scb->list);
+
+		scb->state = SCB_ISSUED;
+		if (mbox_post_cmd(adapter, scb) != 0) {
+
+			con_log(CL_ANN, (KERN_NOTICE
+				"megaraid mbox: LD delete, mailbox busy\n"));
+
+			kioc->status = -EBUSY;
+
+			scb->status = -1;
+
+			megaraid_mbox_mm_done(adapter, scb);
+
+			return (-EBUSY);
+		}
+
+		return 0;
+	}
+
+	// put the command on the pending list and execute
+	megaraid_mbox_runpendq(adapter, scb);
+
+	return 0;
+}
+
+
+static int
+wait_till_fw_empty(adapter_t *adapter)
+{
+	unsigned long	flags = 0;
+	int		i;
+
+
+	/*
+	 * Set the quiescent flag to stop issuing cmds to FW.
+	 */
+	spin_lock_irqsave(&adapter->lock, flags);
+	adapter->quiescent++;
+	spin_unlock_irqrestore(&adapter->lock, flags);
+
+	/*
+	 * Wait till there are no more cmds outstanding at FW. Try for at most
+	 * 60 seconds
+	 */
+	for (i = 0; i < 60 && adapter->outstanding_cmds; i++) {
+		con_log(CL_DLEVEL1, (KERN_INFO
+			"megaraid: FW has %d pending commands\n",
+			adapter->outstanding_cmds));
+
+		msleep(1000);
+	}
+
+	return adapter->outstanding_cmds;
+}
+
+
+/**
+ * megaraid_mbox_mm_done - callback for CMM commands
+ * @adapter	: HBA soft state
+ * @scb		: completed command
+ *
+ * Callback routine for internal commands originated from the management
+ * module.
+ */
+static void
+megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
+{
+	uioc_t			*kioc;
+	mbox64_t		*mbox64;
+	uint8_t			*raw_mbox;
+	unsigned long		flags;
+
+	kioc			= (uioc_t *)scb->gp;
+	mbox64			= (mbox64_t *)(unsigned long)kioc->cmdbuf;
+	mbox64->mbox32.status	= scb->status;
+	raw_mbox		= (uint8_t *)&mbox64->mbox32;
+
+
+	// put scb in the free pool
+	scb->state	= SCB_FREE;
+	scb->scp	= NULL;
+
+	spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags);
+
+	list_add(&scb->list, &adapter->uscb_pool);
+
+	spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
+
+	// if a delete logical drive operation succeeded, restart the
+	// controller
+	if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) {
+
+		adapter->quiescent--;
+
+		megaraid_mbox_runpendq(adapter, NULL);
+	}
+
+	kioc->done(kioc);
+
+	return;
+}
+
+
+/**
+ * gather_hbainfo - HBA characteristics for the applications
+ * @adapter		: HBA soft state
+ * @hinfo		: pointer to the caller's host info strucuture
+ */
+static int
+gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo)
+{
+	uint8_t	dmajor;
+
+	dmajor			= megaraid_mbox_version[0];
+
+	hinfo->pci_vendor_id	= adapter->pdev->vendor;
+	hinfo->pci_device_id	= adapter->pdev->device;
+	hinfo->subsys_vendor_id	= adapter->pdev->subsystem_vendor;
+	hinfo->subsys_device_id	= adapter->pdev->subsystem_device;
+
+	hinfo->pci_bus		= adapter->pdev->bus->number;
+	hinfo->pci_dev_fn	= adapter->pdev->devfn;
+	hinfo->pci_slot		= PCI_SLOT(adapter->pdev->devfn);
+	hinfo->irq		= adapter->host->irq;
+	hinfo->baseport		= ADAP2RAIDDEV(adapter)->baseport;
+
+	hinfo->unique_id	= (hinfo->pci_bus << 8) | adapter->pdev->devfn;
+	hinfo->host_no		= adapter->host->host_no;
+
+	return 0;
+}
+
+/*
+ * END: Interface for the common management module
+ */
+
+
+
+/**
+ * megaraid_sysfs_alloc_resources - allocate sysfs related resources
+ * @adapter	: controller's soft state
+ *
+ * Allocate packets required to issue FW calls whenever the sysfs attributes
+ * are read. These attributes would require up-to-date information from the
+ * FW. Also set up resources for mutual exclusion to share these resources and
+ * the wait queue.
+ *
+ * Return 0 on success.
+ * Return -ERROR_CODE on failure.
+ */
+static int
+megaraid_sysfs_alloc_resources(adapter_t *adapter)
+{
+	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
+	int		rval = 0;
+
+	raid_dev->sysfs_uioc = kmalloc(sizeof(uioc_t), GFP_KERNEL);
+
+	raid_dev->sysfs_mbox64 = kmalloc(sizeof(mbox64_t), GFP_KERNEL);
+
+	raid_dev->sysfs_buffer = pci_alloc_consistent(adapter->pdev,
+			PAGE_SIZE, &raid_dev->sysfs_buffer_dma);
+
+	if (!raid_dev->sysfs_uioc || !raid_dev->sysfs_mbox64 ||
+		!raid_dev->sysfs_buffer) {
+
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid: out of memory, %s %d\n", __func__,
+			__LINE__));
+
+		rval = -ENOMEM;
+
+		megaraid_sysfs_free_resources(adapter);
+	}
+
+	mutex_init(&raid_dev->sysfs_mtx);
+
+	init_waitqueue_head(&raid_dev->sysfs_wait_q);
+
+	return rval;
+}
+
+
+/**
+ * megaraid_sysfs_free_resources - free sysfs related resources
+ * @adapter	: controller's soft state
+ *
+ * Free packets allocated for sysfs FW commands
+ */
+static void
+megaraid_sysfs_free_resources(adapter_t *adapter)
+{
+	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
+
+	kfree(raid_dev->sysfs_uioc);
+	kfree(raid_dev->sysfs_mbox64);
+
+	if (raid_dev->sysfs_buffer) {
+		pci_free_consistent(adapter->pdev, PAGE_SIZE,
+			raid_dev->sysfs_buffer, raid_dev->sysfs_buffer_dma);
+	}
+}
+
+
+/**
+ * megaraid_sysfs_get_ldmap_done - callback for get ldmap
+ * @uioc	: completed packet
+ *
+ * Callback routine called in the ISR/tasklet context for get ldmap call
+ */
+static void
+megaraid_sysfs_get_ldmap_done(uioc_t *uioc)
+{
+	adapter_t	*adapter = (adapter_t *)uioc->buf_vaddr;
+	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
+
+	uioc->status = 0;
+
+	wake_up(&raid_dev->sysfs_wait_q);
+}
+
+
+/**
+ * megaraid_sysfs_get_ldmap_timeout - timeout handling for get ldmap
+ * @data	: timed out packet
+ *
+ * Timeout routine to recover and return to application, in case the adapter
+ * has stopped responding. A timeout of 60 seconds for this command seems like
+ * a good value.
+ */
+static void
+megaraid_sysfs_get_ldmap_timeout(unsigned long data)
+{
+	uioc_t		*uioc = (uioc_t *)data;
+	adapter_t	*adapter = (adapter_t *)uioc->buf_vaddr;
+	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
+
+	uioc->status = -ETIME;
+
+	wake_up(&raid_dev->sysfs_wait_q);
+}
+
+
+/**
+ * megaraid_sysfs_get_ldmap - get update logical drive map
+ * @adapter	: controller's soft state
+ *
+ * This routine will be called whenever user reads the logical drive
+ * attributes, go get the current logical drive mapping table from the
+ * firmware. We use the management API's to issue commands to the controller.
+ *
+ * NOTE: The commands issuance functionality is not generalized and
+ * implemented in context of "get ld map" command only. If required, the
+ * command issuance logical can be trivially pulled out and implemented as a
+ * standalone library. For now, this should suffice since there is no other
+ * user of this interface.
+ *
+ * Return 0 on success.
+ * Return -1 on failure.
+ */
+static int
+megaraid_sysfs_get_ldmap(adapter_t *adapter)
+{
+	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);
+	uioc_t			*uioc;
+	mbox64_t		*mbox64;
+	mbox_t			*mbox;
+	char			*raw_mbox;
+	struct timer_list	sysfs_timer;
+	struct timer_list	*timerp;
+	caddr_t			ldmap;
+	int			rval = 0;
+
+	/*
+	 * Allow only one read at a time to go through the sysfs attributes
+	 */
+	mutex_lock(&raid_dev->sysfs_mtx);
+
+	uioc	= raid_dev->sysfs_uioc;
+	mbox64	= raid_dev->sysfs_mbox64;
+	ldmap	= raid_dev->sysfs_buffer;
+
+	memset(uioc, 0, sizeof(uioc_t));
+	memset(mbox64, 0, sizeof(mbox64_t));
+	memset(ldmap, 0, sizeof(raid_dev->curr_ldmap));
+
+	mbox		= &mbox64->mbox32;
+	raw_mbox	= (char *)mbox;
+	uioc->cmdbuf    = (uint64_t)(unsigned long)mbox64;
+	uioc->buf_vaddr	= (caddr_t)adapter;
+	uioc->status	= -ENODATA;
+	uioc->done	= megaraid_sysfs_get_ldmap_done;
+
+	/*
+	 * Prepare the mailbox packet to get the current logical drive mapping
+	 * table
+	 */
+	mbox->xferaddr = (uint32_t)raid_dev->sysfs_buffer_dma;
+
+	raw_mbox[0] = FC_DEL_LOGDRV;
+	raw_mbox[2] = OP_GET_LDID_MAP;
+
+	/*
+	 * Setup a timer to recover from a non-responding controller
+	 */
+	timerp	= &sysfs_timer;
+	init_timer(timerp);
+
+	timerp->function	= megaraid_sysfs_get_ldmap_timeout;
+	timerp->data		= (unsigned long)uioc;
+	timerp->expires		= jiffies + 60 * HZ;
+
+	add_timer(timerp);
+
+	/*
+	 * Send the command to the firmware
+	 */
+	rval = megaraid_mbox_mm_command(adapter, uioc);
+
+	if (rval == 0) {	// command successfully issued
+		wait_event(raid_dev->sysfs_wait_q, (uioc->status != -ENODATA));
+
+		/*
+		 * Check if the command timed out
+		 */
+		if (uioc->status == -ETIME) {
+			con_log(CL_ANN, (KERN_NOTICE
+				"megaraid: sysfs get ld map timed out\n"));
+
+			rval = -ETIME;
+		}
+		else {
+			rval = mbox->status;
+		}
+
+		if (rval == 0) {
+			memcpy(raid_dev->curr_ldmap, ldmap,
+				sizeof(raid_dev->curr_ldmap));
+		}
+		else {
+			con_log(CL_ANN, (KERN_NOTICE
+				"megaraid: get ld map failed with %x\n", rval));
+		}
+	}
+	else {
+		con_log(CL_ANN, (KERN_NOTICE
+			"megaraid: could not issue ldmap command:%x\n", rval));
+	}
+
+
+	del_timer_sync(timerp);
+
+	mutex_unlock(&raid_dev->sysfs_mtx);
+
+	return rval;
+}
+
+
+/**
+ * megaraid_sysfs_show_app_hndl - display application handle for this adapter
+ * @cdev	: class device object representation for the host
+ * @buf		: buffer to send data to
+ *
+ * Display the handle used by the applications while executing management
+ * tasks on the adapter. We invoke a management module API to get the adapter
+ * handle, since we do not interface with applications directly.
+ */
+static ssize_t
+megaraid_sysfs_show_app_hndl(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	adapter_t	*adapter = (adapter_t *)SCSIHOST2ADAP(shost);
+	uint32_t	app_hndl;
+
+	app_hndl = mraid_mm_adapter_app_handle(adapter->unique_id);
+
+	return snprintf(buf, 8, "%u\n", app_hndl);
+}
+
+
+/**
+ * megaraid_sysfs_show_ldnum - display the logical drive number for this device
+ * @dev		: device object representation for the scsi device
+ * @attr	: device attribute to show
+ * @buf		: buffer to send data to
+ *
+ * Display the logical drive number for the device in question, if it a valid
+ * logical drive. For physical devices, "-1" is returned.
+ *
+ * The logical drive number is displayed in following format:
+ *
+ * <SCSI ID> <LD NUM> <LD STICKY ID> <APP ADAPTER HANDLE>
+ *
+ *   <int>     <int>       <int>            <int>
+ */
+static ssize_t
+megaraid_sysfs_show_ldnum(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	adapter_t	*adapter = (adapter_t *)SCSIHOST2ADAP(sdev->host);
+	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
+	int		scsi_id = -1;
+	int		logical_drv = -1;
+	int		ldid_map = -1;
+	uint32_t	app_hndl = 0;
+	int		mapped_sdev_id;
+	int		rval;
+	int		i;
+
+	if (raid_dev->random_del_supported &&
+			MRAID_IS_LOGICAL_SDEV(adapter, sdev)) {
+
+		rval = megaraid_sysfs_get_ldmap(adapter);
+		if (rval == 0) {
+
+			for (i = 0; i < MAX_LOGICAL_DRIVES_40LD; i++) {
+
+				mapped_sdev_id = sdev->id;
+
+				if (sdev->id > adapter->init_id) {
+					mapped_sdev_id -= 1;
+				}
+
+				if (raid_dev->curr_ldmap[i] == mapped_sdev_id) {
+
+					scsi_id = sdev->id;
+
+					logical_drv = i;
+
+					ldid_map = raid_dev->curr_ldmap[i];
+
+					app_hndl = mraid_mm_adapter_app_handle(
+							adapter->unique_id);
+
+					break;
+				}
+			}
+		}
+		else {
+			con_log(CL_ANN, (KERN_NOTICE
+				"megaraid: sysfs get ld map failed: %x\n",
+				rval));
+		}
+	}
+
+	return snprintf(buf, 36, "%d %d %d %d\n", scsi_id, logical_drv,
+			ldid_map, app_hndl);
+}
+
+
+/*
+ * END: Mailbox Low Level Driver
+ */
+module_init(megaraid_init);
+module_exit(megaraid_exit);
+
+/* vim: set ts=8 sw=8 tw=78 ai si: */
diff --git a/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_mbox.h b/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_mbox.h
new file mode 100644
index 0000000..c1d86d9
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_mbox.h
@@ -0,0 +1,238 @@
+/*
+ *
+ *			Linux MegaRAID device driver
+ *
+ * Copyright (c) 2003-2004  LSI Logic Corporation.
+ *
+ *	   This program is free software; you can redistribute it and/or
+ *	   modify it under the terms of the GNU General Public License
+ *	   as published by the Free Software Foundation; either version
+ *	   2 of the License, or (at your option) any later version.
+ *
+ * FILE		: megaraid_mbox.h
+ */
+
+#ifndef _MEGARAID_H_
+#define _MEGARAID_H_
+
+
+#include "mega_common.h"
+#include "mbox_defs.h"
+#include "megaraid_ioctl.h"
+
+
+#define MEGARAID_VERSION	"2.20.5.1"
+#define MEGARAID_EXT_VERSION	"(Release Date: Thu Nov 16 15:32:35 EST 2006)"
+
+
+/*
+ * Define some PCI values here until they are put in the kernel
+ */
+#define PCI_DEVICE_ID_PERC4_DI_DISCOVERY		0x000E
+#define PCI_SUBSYS_ID_PERC4_DI_DISCOVERY		0x0123
+
+#define PCI_DEVICE_ID_PERC4_SC				0x1960
+#define PCI_SUBSYS_ID_PERC4_SC				0x0520
+
+#define PCI_DEVICE_ID_PERC4_DC				0x1960
+#define PCI_SUBSYS_ID_PERC4_DC				0x0518
+
+#define PCI_DEVICE_ID_VERDE				0x0407
+
+#define PCI_DEVICE_ID_PERC4_DI_EVERGLADES		0x000F
+#define PCI_SUBSYS_ID_PERC4_DI_EVERGLADES		0x014A
+
+#define PCI_DEVICE_ID_PERC4E_SI_BIGBEND			0x0013
+#define PCI_SUBSYS_ID_PERC4E_SI_BIGBEND			0x016c
+
+#define PCI_DEVICE_ID_PERC4E_DI_KOBUK			0x0013
+#define PCI_SUBSYS_ID_PERC4E_DI_KOBUK			0x016d
+
+#define PCI_DEVICE_ID_PERC4E_DI_CORVETTE		0x0013
+#define PCI_SUBSYS_ID_PERC4E_DI_CORVETTE		0x016e
+
+#define PCI_DEVICE_ID_PERC4E_DI_EXPEDITION		0x0013
+#define PCI_SUBSYS_ID_PERC4E_DI_EXPEDITION		0x016f
+
+#define PCI_DEVICE_ID_PERC4E_DI_GUADALUPE		0x0013
+#define PCI_SUBSYS_ID_PERC4E_DI_GUADALUPE		0x0170
+
+#define PCI_DEVICE_ID_DOBSON				0x0408
+
+#define PCI_DEVICE_ID_MEGARAID_SCSI_320_0		0x1960
+#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_0		0xA520
+
+#define PCI_DEVICE_ID_MEGARAID_SCSI_320_1		0x1960
+#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_1		0x0520
+
+#define PCI_DEVICE_ID_MEGARAID_SCSI_320_2		0x1960
+#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_2		0x0518
+
+#define PCI_DEVICE_ID_MEGARAID_I4_133_RAID		0x1960
+#define PCI_SUBSYS_ID_MEGARAID_I4_133_RAID		0x0522
+
+#define PCI_DEVICE_ID_MEGARAID_SATA_150_4		0x1960
+#define PCI_SUBSYS_ID_MEGARAID_SATA_150_4		0x4523
+
+#define PCI_DEVICE_ID_MEGARAID_SATA_150_6		0x1960
+#define PCI_SUBSYS_ID_MEGARAID_SATA_150_6		0x0523
+
+#define PCI_DEVICE_ID_LINDSAY				0x0409
+
+#define PCI_DEVICE_ID_INTEL_RAID_SRCS16			0x1960
+#define PCI_SUBSYS_ID_INTEL_RAID_SRCS16			0x0523
+
+#define PCI_DEVICE_ID_INTEL_RAID_SRCU41L_LAKE_SHETEK	0x1960
+#define PCI_SUBSYS_ID_INTEL_RAID_SRCU41L_LAKE_SHETEK	0x0520
+
+#define PCI_SUBSYS_ID_PERC3_QC				0x0471
+#define PCI_SUBSYS_ID_PERC3_DC				0x0493
+#define PCI_SUBSYS_ID_PERC3_SC				0x0475
+#define PCI_SUBSYS_ID_CERC_ATA100_4CH			0x0511
+
+
+#define MBOX_MAX_SCSI_CMDS	128	// number of cmds reserved for kernel
+#define MBOX_MAX_USER_CMDS	32	// number of cmds for applications
+#define MBOX_DEF_CMD_PER_LUN	64	// default commands per lun
+#define MBOX_DEFAULT_SG_SIZE	26	// default sg size supported by all fw
+#define MBOX_MAX_SG_SIZE	32	// maximum scatter-gather list size
+#define MBOX_MAX_SECTORS	128	// maximum sectors per IO
+#define MBOX_TIMEOUT		30	// timeout value for internal cmds
+#define MBOX_BUSY_WAIT		10	// max usec to wait for busy mailbox
+#define MBOX_RESET_WAIT		180	// wait these many seconds in reset
+#define MBOX_RESET_EXT_WAIT	120	// extended wait reset
+#define MBOX_SYNC_WAIT_CNT	0xFFFF	// wait loop index for synchronous mode
+
+#define MBOX_SYNC_DELAY_200	200	// 200 micro-seconds
+
+/*
+ * maximum transfer that can happen through the firmware commands issued
+ * internnaly from the driver.
+ */
+#define MBOX_IBUF_SIZE		4096
+
+
+/**
+ * mbox_ccb_t - command control block specific to mailbox based controllers
+ * @raw_mbox		: raw mailbox pointer
+ * @mbox		: mailbox
+ * @mbox64		: extended mailbox
+ * @mbox_dma_h		: maibox dma address
+ * @sgl64		: 64-bit scatter-gather list
+ * @sgl32		: 32-bit scatter-gather list
+ * @sgl_dma_h		: dma handle for the scatter-gather list
+ * @pthru		: passthru structure
+ * @pthru_dma_h		: dma handle for the passthru structure
+ * @epthru		: extended passthru structure
+ * @epthru_dma_h	: dma handle for extended passthru structure
+ * @buf_dma_h		: dma handle for buffers w/o sg list
+ *
+ * command control block specific to the mailbox based controllers
+ */
+typedef struct {
+	uint8_t			*raw_mbox;
+	mbox_t			*mbox;
+	mbox64_t		*mbox64;
+	dma_addr_t		mbox_dma_h;
+	mbox_sgl64		*sgl64;
+	mbox_sgl32		*sgl32;
+	dma_addr_t		sgl_dma_h;
+	mraid_passthru_t	*pthru;
+	dma_addr_t		pthru_dma_h;
+	mraid_epassthru_t	*epthru;
+	dma_addr_t		epthru_dma_h;
+	dma_addr_t		buf_dma_h;
+} mbox_ccb_t;
+
+
+/**
+ * mraid_device_t - adapter soft state structure for mailbox controllers
+ * @una_mbox64			: 64-bit mbox - unaligned
+ * @una_mbox64_dma		: mbox dma addr - unaligned
+ * @mbox			: 32-bit mbox - aligned
+ * @mbox64			: 64-bit mbox - aligned
+ * @mbox_dma			: mbox dma addr - aligned
+ * @mailbox_lock		: exclusion lock for the mailbox
+ * @baseport			: base port of hba memory
+ * @baseaddr			: mapped addr of hba memory
+ * @mbox_pool			: pool of mailboxes
+ * @mbox_pool_handle		: handle for the mailbox pool memory
+ * @epthru_pool			: a pool for extended passthru commands
+ * @epthru_pool_handle		: handle to the pool above
+ * @sg_pool			: pool of scatter-gather lists for this driver
+ * @sg_pool_handle		: handle to the pool above
+ * @ccb_list			: list of our command control blocks
+ * @uccb_list			: list of cmd control blocks for mgmt module
+ * @umbox64			: array of mailbox for user commands (cmm)
+ * @pdrv_state			: array for state of each physical drive.
+ * @last_disp			: flag used to show device scanning
+ * @hw_error			: set if FW not responding
+ * @fast_load			: If set, skip physical device scanning
+ * @channel_class		: channel class, RAID or SCSI
+ * @sysfs_mtx			: mutex to serialize access to sysfs res.
+ * @sysfs_uioc			: management packet to issue FW calls from sysfs
+ * @sysfs_mbox64		: mailbox packet to issue FW calls from sysfs
+ * @sysfs_buffer		: data buffer for FW commands issued from sysfs
+ * @sysfs_buffer_dma		: DMA buffer for FW commands issued from sysfs
+ * @sysfs_wait_q		: wait queue for sysfs operations
+ * @random_del_supported	: set if the random deletion is supported
+ * @curr_ldmap			: current LDID map
+ *
+ * Initialization structure for mailbox controllers: memory based and IO based
+ * All the fields in this structure are LLD specific and may be discovered at
+ * init() or start() time.
+ *
+ * NOTE: The fields of this structures are placed to minimize cache misses
+ */
+#define MAX_LD_EXTENDED64	64
+typedef struct {
+	mbox64_t			*una_mbox64;
+	dma_addr_t			una_mbox64_dma;
+	mbox_t				*mbox;
+	mbox64_t			*mbox64;
+	dma_addr_t			mbox_dma;
+	spinlock_t			mailbox_lock;
+	unsigned long			baseport;
+	void __iomem *			baseaddr;
+	struct mraid_pci_blk		mbox_pool[MBOX_MAX_SCSI_CMDS];
+	struct dma_pool			*mbox_pool_handle;
+	struct mraid_pci_blk		epthru_pool[MBOX_MAX_SCSI_CMDS];
+	struct dma_pool			*epthru_pool_handle;
+	struct mraid_pci_blk		sg_pool[MBOX_MAX_SCSI_CMDS];
+	struct dma_pool			*sg_pool_handle;
+	mbox_ccb_t			ccb_list[MBOX_MAX_SCSI_CMDS];
+	mbox_ccb_t			uccb_list[MBOX_MAX_USER_CMDS];
+	mbox64_t			umbox64[MBOX_MAX_USER_CMDS];
+
+	uint8_t				pdrv_state[MBOX_MAX_PHYSICAL_DRIVES];
+	uint32_t			last_disp;
+	int				hw_error;
+	int				fast_load;
+	uint8_t				channel_class;
+	struct mutex			sysfs_mtx;
+	uioc_t				*sysfs_uioc;
+	mbox64_t			*sysfs_mbox64;
+	caddr_t				sysfs_buffer;
+	dma_addr_t			sysfs_buffer_dma;
+	wait_queue_head_t		sysfs_wait_q;
+	int				random_del_supported;
+	uint16_t			curr_ldmap[MAX_LD_EXTENDED64];
+} mraid_device_t;
+
+// route to raid device from adapter
+#define ADAP2RAIDDEV(adp)	((mraid_device_t *)((adp)->raid_device))
+
+#define MAILBOX_LOCK(rdev)	(&(rdev)->mailbox_lock)
+
+// Find out if this channel is a RAID or SCSI
+#define IS_RAID_CH(rdev, ch)	(((rdev)->channel_class >> (ch)) & 0x01)
+
+
+#define RDINDOOR(rdev)		readl((rdev)->baseaddr + 0x20)
+#define RDOUTDOOR(rdev)		readl((rdev)->baseaddr + 0x2C)
+#define WRINDOOR(rdev, value)	writel(value, (rdev)->baseaddr + 0x20)
+#define WROUTDOOR(rdev, value)	writel(value, (rdev)->baseaddr + 0x2C)
+
+#endif // _MEGARAID_H_
+
+// vim: set ts=8 sw=8 tw=78:
diff --git a/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_mm.c b/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_mm.c
new file mode 100644
index 0000000..65b6f6a
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_mm.c
@@ -0,0 +1,1268 @@
+/*
+ *
+ *			Linux MegaRAID device driver
+ *
+ * Copyright (c) 2003-2004  LSI Logic Corporation.
+ *
+ *	   This program is free software; you can redistribute it and/or
+ *	   modify it under the terms of the GNU General Public License
+ *	   as published by the Free Software Foundation; either version
+ *	   2 of the License, or (at your option) any later version.
+ *
+ * FILE		: megaraid_mm.c
+ * Version	: v2.20.2.7 (Jul 16 2006)
+ *
+ * Common management module
+ */
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include "megaraid_mm.h"
+
+
+// Entry points for char node driver
+static DEFINE_MUTEX(mraid_mm_mutex);
+static int mraid_mm_open(struct inode *, struct file *);
+static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long);
+
+
+// routines to convert to and from the old the format
+static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *);
+static int kioc_to_mimd(uioc_t *, mimd_t __user *);
+
+
+// Helper functions
+static int handle_drvrcmd(void __user *, uint8_t, int *);
+static int lld_ioctl(mraid_mmadp_t *, uioc_t *);
+static void ioctl_done(uioc_t *);
+static void lld_timedout(unsigned long);
+static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *);
+static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *);
+static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *);
+static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *);
+static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int);
+static int mraid_mm_setup_dma_pools(mraid_mmadp_t *);
+static void mraid_mm_free_adp_resources(mraid_mmadp_t *);
+static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *);
+
+#ifdef CONFIG_COMPAT
+static long mraid_mm_compat_ioctl(struct file *, unsigned int, unsigned long);
+#endif
+
+MODULE_AUTHOR("LSI Logic Corporation");
+MODULE_DESCRIPTION("LSI Logic Management Module");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(LSI_COMMON_MOD_VERSION);
+
+static int dbglevel = CL_ANN;
+module_param_named(dlevel, dbglevel, int, 0);
+MODULE_PARM_DESC(dlevel, "Debug level (default=0)");
+
+EXPORT_SYMBOL(mraid_mm_register_adp);
+EXPORT_SYMBOL(mraid_mm_unregister_adp);
+EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
+
+static uint32_t drvr_ver	= 0x02200207;
+
+static int adapters_count_g;
+static struct list_head adapters_list_g;
+
+static wait_queue_head_t wait_q;
+
+static const struct file_operations lsi_fops = {
+	.open	= mraid_mm_open,
+	.unlocked_ioctl = mraid_mm_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = mraid_mm_compat_ioctl,
+#endif
+	.owner	= THIS_MODULE,
+	.llseek = noop_llseek,
+};
+
+static struct miscdevice megaraid_mm_dev = {
+	.minor	= MISC_DYNAMIC_MINOR,
+	.name   = "megadev0",
+	.fops   = &lsi_fops,
+};
+
+/**
+ * mraid_mm_open - open routine for char node interface
+ * @inode	: unused
+ * @filep	: unused
+ *
+ * Allow ioctl operations by apps only if they have superuser privilege.
+ */
+static int
+mraid_mm_open(struct inode *inode, struct file *filep)
+{
+	/*
+	 * Only allow superuser to access private ioctl interface
+	 */
+	if (!capable(CAP_SYS_ADMIN)) return (-EACCES);
+
+	return 0;
+}
+
+/**
+ * mraid_mm_ioctl - module entry-point for ioctls
+ * @inode	: inode (ignored)
+ * @filep	: file operations pointer (ignored)
+ * @cmd		: ioctl command
+ * @arg		: user ioctl packet
+ */
+static int
+mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+	uioc_t		*kioc;
+	char		signature[EXT_IOCTL_SIGN_SZ]	= {0};
+	int		rval;
+	mraid_mmadp_t	*adp;
+	uint8_t		old_ioctl;
+	int		drvrcmd_rval;
+	void __user *argp = (void __user *)arg;
+
+	/*
+	 * Make sure only USCSICMD are issued through this interface.
+	 * MIMD application would still fire different command.
+	 */
+
+	if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) {
+		return (-EINVAL);
+	}
+
+	/*
+	 * Look for signature to see if this is the new or old ioctl format.
+	 */
+	if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) {
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid cmm: copy from usr addr failed\n"));
+		return (-EFAULT);
+	}
+
+	if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0)
+		old_ioctl = 0;
+	else
+		old_ioctl = 1;
+
+	/*
+	 * At present, we don't support the new ioctl packet
+	 */
+	if (!old_ioctl )
+		return (-EINVAL);
+
+	/*
+	 * If it is a driver ioctl (as opposed to fw ioctls), then we can
+	 * handle the command locally. rval > 0 means it is not a drvr cmd
+	 */
+	rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval);
+
+	if (rval < 0)
+		return rval;
+	else if (rval == 0)
+		return drvrcmd_rval;
+
+	rval = 0;
+	if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) {
+		return rval;
+	}
+
+	/*
+	 * Check if adapter can accept ioctl. We may have marked it offline
+	 * if any previous kioc had timedout on this controller.
+	 */
+	if (!adp->quiescent) {
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid cmm: controller cannot accept cmds due to "
+			"earlier errors\n" ));
+		return -EFAULT;
+	}
+
+	/*
+	 * The following call will block till a kioc is available
+	 * or return NULL if the list head is empty for the pointer
+	 * of type mraid_mmapt passed to mraid_mm_alloc_kioc
+	 */
+	kioc = mraid_mm_alloc_kioc(adp);
+	if (!kioc)
+		return -ENXIO;
+
+	/*
+	 * User sent the old mimd_t ioctl packet. Convert it to uioc_t.
+	 */
+	if ((rval = mimd_to_kioc(argp, adp, kioc))) {
+		mraid_mm_dealloc_kioc(adp, kioc);
+		return rval;
+	}
+
+	kioc->done = ioctl_done;
+
+	/*
+	 * Issue the IOCTL to the low level driver. After the IOCTL completes
+	 * release the kioc if and only if it was _not_ timedout. If it was
+	 * timedout, that means that resources are still with low level driver.
+	 */
+	if ((rval = lld_ioctl(adp, kioc))) {
+
+		if (!kioc->timedout)
+			mraid_mm_dealloc_kioc(adp, kioc);
+
+		return rval;
+	}
+
+	/*
+	 * Convert the kioc back to user space
+	 */
+	rval = kioc_to_mimd(kioc, argp);
+
+	/*
+	 * Return the kioc to free pool
+	 */
+	mraid_mm_dealloc_kioc(adp, kioc);
+
+	return rval;
+}
+
+static long
+mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
+		        unsigned long arg)
+{
+	int err;
+
+	/* inconsistent: mraid_mm_compat_ioctl doesn't take the BKL */
+	mutex_lock(&mraid_mm_mutex);
+	err = mraid_mm_ioctl(filep, cmd, arg);
+	mutex_unlock(&mraid_mm_mutex);
+
+	return err;
+}
+
+/**
+ * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
+ * @umimd	: User space mimd_t ioctl packet
+ * @rval	: returned success/error status
+ *
+ * The function return value is a pointer to the located @adapter.
+ */
+static mraid_mmadp_t *
+mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
+{
+	mraid_mmadp_t	*adapter;
+	mimd_t		mimd;
+	uint32_t	adapno;
+	int		iterator;
+
+
+	if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
+		*rval = -EFAULT;
+		return NULL;
+	}
+
+	adapno = GETADAP(mimd.ui.fcs.adapno);
+
+	if (adapno >= adapters_count_g) {
+		*rval = -ENODEV;
+		return NULL;
+	}
+
+	adapter = NULL;
+	iterator = 0;
+
+	list_for_each_entry(adapter, &adapters_list_g, list) {
+		if (iterator++ == adapno) break;
+	}
+
+	if (!adapter) {
+		*rval = -ENODEV;
+		return NULL;
+	}
+
+	return adapter;
+}
+
+/**
+ * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it.
+ * @arg		: packet sent by the user app
+ * @old_ioctl	: mimd if 1; uioc otherwise
+ * @rval	: pointer for command's returned value (not function status)
+ */
+static int
+handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval)
+{
+	mimd_t		__user *umimd;
+	mimd_t		kmimd;
+	uint8_t		opcode;
+	uint8_t		subopcode;
+
+	if (old_ioctl)
+		goto old_packet;
+	else
+		goto new_packet;
+
+new_packet:
+	return (-ENOTSUPP);
+
+old_packet:
+	*rval = 0;
+	umimd = arg;
+
+	if (copy_from_user(&kmimd, umimd, sizeof(mimd_t)))
+		return (-EFAULT);
+
+	opcode		= kmimd.ui.fcs.opcode;
+	subopcode	= kmimd.ui.fcs.subopcode;
+
+	/*
+	 * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or
+	 * GET_NUMADP, then we can handle. Otherwise we should return 1 to
+	 * indicate that we cannot handle this.
+	 */
+	if (opcode != 0x82)
+		return 1;
+
+	switch (subopcode) {
+
+	case MEGAIOC_QDRVRVER:
+
+		if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t)))
+			return (-EFAULT);
+
+		return 0;
+
+	case MEGAIOC_QNADAP:
+
+		*rval = adapters_count_g;
+
+		if (copy_to_user(kmimd.data, &adapters_count_g,
+				sizeof(uint32_t)))
+			return (-EFAULT);
+
+		return 0;
+
+	default:
+		/* cannot handle */
+		return 1;
+	}
+
+	return 0;
+}
+
+
+/**
+ * mimd_to_kioc	- Converter from old to new ioctl format
+ * @umimd	: user space old MIMD IOCTL
+ * @adp		: adapter softstate
+ * @kioc	: kernel space new format IOCTL
+ *
+ * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The
+ * new packet is in kernel space so that driver can perform operations on it
+ * freely.
+ */
+
+static int
+mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
+{
+	mbox64_t		*mbox64;
+	mbox_t			*mbox;
+	mraid_passthru_t	*pthru32;
+	uint32_t		adapno;
+	uint8_t			opcode;
+	uint8_t			subopcode;
+	mimd_t			mimd;
+
+	if (copy_from_user(&mimd, umimd, sizeof(mimd_t)))
+		return (-EFAULT);
+
+	/*
+	 * Applications are not allowed to send extd pthru
+	 */
+	if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) ||
+			(mimd.mbox[0] == MBOXCMD_EXTPTHRU))
+		return (-EINVAL);
+
+	opcode		= mimd.ui.fcs.opcode;
+	subopcode	= mimd.ui.fcs.subopcode;
+	adapno		= GETADAP(mimd.ui.fcs.adapno);
+
+	if (adapno >= adapters_count_g)
+		return (-ENODEV);
+
+	kioc->adapno	= adapno;
+	kioc->mb_type	= MBOX_LEGACY;
+	kioc->app_type	= APPTYPE_MIMD;
+
+	switch (opcode) {
+
+	case 0x82:
+
+		if (subopcode == MEGAIOC_QADAPINFO) {
+
+			kioc->opcode	= GET_ADAP_INFO;
+			kioc->data_dir	= UIOC_RD;
+			kioc->xferlen	= sizeof(mraid_hba_info_t);
+
+			if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
+				return (-ENOMEM);
+		}
+		else {
+			con_log(CL_ANN, (KERN_WARNING
+					"megaraid cmm: Invalid subop\n"));
+			return (-EINVAL);
+		}
+
+		break;
+
+	case 0x81:
+
+		kioc->opcode		= MBOX_CMD;
+		kioc->xferlen		= mimd.ui.fcs.length;
+		kioc->user_data_len	= kioc->xferlen;
+		kioc->user_data		= mimd.ui.fcs.buffer;
+
+		if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
+			return (-ENOMEM);
+
+		if (mimd.outlen) kioc->data_dir  = UIOC_RD;
+		if (mimd.inlen) kioc->data_dir |= UIOC_WR;
+
+		break;
+
+	case 0x80:
+
+		kioc->opcode		= MBOX_CMD;
+		kioc->xferlen		= (mimd.outlen > mimd.inlen) ?
+						mimd.outlen : mimd.inlen;
+		kioc->user_data_len	= kioc->xferlen;
+		kioc->user_data		= mimd.data;
+
+		if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
+			return (-ENOMEM);
+
+		if (mimd.outlen) kioc->data_dir  = UIOC_RD;
+		if (mimd.inlen) kioc->data_dir |= UIOC_WR;
+
+		break;
+
+	default:
+		return (-EINVAL);
+	}
+
+	/*
+	 * If driver command, nothing else to do
+	 */
+	if (opcode == 0x82)
+		return 0;
+
+	/*
+	 * This is a mailbox cmd; copy the mailbox from mimd
+	 */
+	mbox64	= (mbox64_t *)((unsigned long)kioc->cmdbuf);
+	mbox	= &mbox64->mbox32;
+	memcpy(mbox, mimd.mbox, 14);
+
+	if (mbox->cmd != MBOXCMD_PASSTHRU) {	// regular DCMD
+
+		mbox->xferaddr	= (uint32_t)kioc->buf_paddr;
+
+		if (kioc->data_dir & UIOC_WR) {
+			if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
+							kioc->xferlen)) {
+				return (-EFAULT);
+			}
+		}
+
+		return 0;
+	}
+
+	/*
+	 * This is a regular 32-bit pthru cmd; mbox points to pthru struct.
+	 * Just like in above case, the beginning for memblk is treated as
+	 * a mailbox. The passthru will begin at next 1K boundary. And the
+	 * data will start 1K after that.
+	 */
+	pthru32			= kioc->pthru32;
+	kioc->user_pthru	= &umimd->pthru;
+	mbox->xferaddr		= (uint32_t)kioc->pthru32_h;
+
+	if (copy_from_user(pthru32, kioc->user_pthru,
+			sizeof(mraid_passthru_t))) {
+		return (-EFAULT);
+	}
+
+	pthru32->dataxferaddr	= kioc->buf_paddr;
+	if (kioc->data_dir & UIOC_WR) {
+		if (pthru32->dataxferlen > kioc->xferlen)
+			return -EINVAL;
+		if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
+						pthru32->dataxferlen)) {
+			return (-EFAULT);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * mraid_mm_attch_buf - Attach a free dma buffer for required size
+ * @adp		: Adapter softstate
+ * @kioc	: kioc that the buffer needs to be attached to
+ * @xferlen	: required length for buffer
+ *
+ * First we search for a pool with smallest buffer that is >= @xferlen. If
+ * that pool has no free buffer, we will try for the next bigger size. If none
+ * is available, we will try to allocate the smallest buffer that is >=
+ * @xferlen and attach it the pool.
+ */
+static int
+mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
+{
+	mm_dmapool_t	*pool;
+	int		right_pool = -1;
+	unsigned long	flags;
+	int		i;
+
+	kioc->pool_index	= -1;
+	kioc->buf_vaddr		= NULL;
+	kioc->buf_paddr		= 0;
+	kioc->free_buf		= 0;
+
+	/*
+	 * We need xferlen amount of memory. See if we can get it from our
+	 * dma pools. If we don't get exact size, we will try bigger buffer
+	 */
+
+	for (i = 0; i < MAX_DMA_POOLS; i++) {
+
+		pool = &adp->dma_pool_list[i];
+
+		if (xferlen > pool->buf_size)
+			continue;
+
+		if (right_pool == -1)
+			right_pool = i;
+
+		spin_lock_irqsave(&pool->lock, flags);
+
+		if (!pool->in_use) {
+
+			pool->in_use		= 1;
+			kioc->pool_index	= i;
+			kioc->buf_vaddr		= pool->vaddr;
+			kioc->buf_paddr		= pool->paddr;
+
+			spin_unlock_irqrestore(&pool->lock, flags);
+			return 0;
+		}
+		else {
+			spin_unlock_irqrestore(&pool->lock, flags);
+			continue;
+		}
+	}
+
+	/*
+	 * If xferlen doesn't match any of our pools, return error
+	 */
+	if (right_pool == -1)
+		return -EINVAL;
+
+	/*
+	 * We did not get any buffer from the preallocated pool. Let us try
+	 * to allocate one new buffer. NOTE: This is a blocking call.
+	 */
+	pool = &adp->dma_pool_list[right_pool];
+
+	spin_lock_irqsave(&pool->lock, flags);
+
+	kioc->pool_index	= right_pool;
+	kioc->free_buf		= 1;
+	kioc->buf_vaddr		= dma_pool_alloc(pool->handle, GFP_ATOMIC,
+							&kioc->buf_paddr);
+	spin_unlock_irqrestore(&pool->lock, flags);
+
+	if (!kioc->buf_vaddr)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/**
+ * mraid_mm_alloc_kioc - Returns a uioc_t from free list
+ * @adp	: Adapter softstate for this module
+ *
+ * The kioc_semaphore is initialized with number of kioc nodes in the
+ * free kioc pool. If the kioc pool is empty, this function blocks till
+ * a kioc becomes free.
+ */
+static uioc_t *
+mraid_mm_alloc_kioc(mraid_mmadp_t *adp)
+{
+	uioc_t			*kioc;
+	struct list_head*	head;
+	unsigned long		flags;
+
+	down(&adp->kioc_semaphore);
+
+	spin_lock_irqsave(&adp->kioc_pool_lock, flags);
+
+	head = &adp->kioc_pool;
+
+	if (list_empty(head)) {
+		up(&adp->kioc_semaphore);
+		spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
+
+		con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n"));
+		return NULL;
+	}
+
+	kioc = list_entry(head->next, uioc_t, list);
+	list_del_init(&kioc->list);
+
+	spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
+
+	memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t));
+	memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t));
+
+	kioc->buf_vaddr		= NULL;
+	kioc->buf_paddr		= 0;
+	kioc->pool_index	=-1;
+	kioc->free_buf		= 0;
+	kioc->user_data		= NULL;
+	kioc->user_data_len	= 0;
+	kioc->user_pthru	= NULL;
+	kioc->timedout		= 0;
+
+	return kioc;
+}
+
+/**
+ * mraid_mm_dealloc_kioc - Return kioc to free pool
+ * @adp		: Adapter softstate
+ * @kioc	: uioc_t node to be returned to free pool
+ */
+static void
+mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
+{
+	mm_dmapool_t	*pool;
+	unsigned long	flags;
+
+	if (kioc->pool_index != -1) {
+		pool = &adp->dma_pool_list[kioc->pool_index];
+
+		/* This routine may be called in non-isr context also */
+		spin_lock_irqsave(&pool->lock, flags);
+
+		/*
+		 * While attaching the dma buffer, if we didn't get the 
+		 * required buffer from the pool, we would have allocated 
+		 * it at the run time and set the free_buf flag. We must 
+		 * free that buffer. Otherwise, just mark that the buffer is 
+		 * not in use
+		 */
+		if (kioc->free_buf == 1)
+			dma_pool_free(pool->handle, kioc->buf_vaddr, 
+							kioc->buf_paddr);
+		else
+			pool->in_use = 0;
+
+		spin_unlock_irqrestore(&pool->lock, flags);
+	}
+
+	/* Return the kioc to the free pool */
+	spin_lock_irqsave(&adp->kioc_pool_lock, flags);
+	list_add(&kioc->list, &adp->kioc_pool);
+	spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
+
+	/* increment the free kioc count */
+	up(&adp->kioc_semaphore);
+
+	return;
+}
+
+/**
+ * lld_ioctl - Routine to issue ioctl to low level drvr
+ * @adp		: The adapter handle
+ * @kioc	: The ioctl packet with kernel addresses
+ */
+static int
+lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
+{
+	int			rval;
+	struct timer_list	timer;
+	struct timer_list	*tp = NULL;
+
+	kioc->status	= -ENODATA;
+	rval		= adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE);
+
+	if (rval) return rval;
+
+	/*
+	 * Start the timer
+	 */
+	if (adp->timeout > 0) {
+		tp		= &timer;
+		init_timer(tp);
+
+		tp->function	= lld_timedout;
+		tp->data	= (unsigned long)kioc;
+		tp->expires	= jiffies + adp->timeout * HZ;
+
+		add_timer(tp);
+	}
+
+	/*
+	 * Wait till the low level driver completes the ioctl. After this
+	 * call, the ioctl either completed successfully or timedout.
+	 */
+	wait_event(wait_q, (kioc->status != -ENODATA));
+	if (tp) {
+		del_timer_sync(tp);
+	}
+
+	/*
+	 * If the command had timedout, we mark the controller offline
+	 * before returning
+	 */
+	if (kioc->timedout) {
+		adp->quiescent = 0;
+	}
+
+	return kioc->status;
+}
+
+
+/**
+ * ioctl_done - callback from the low level driver
+ * @kioc	: completed ioctl packet
+ */
+static void
+ioctl_done(uioc_t *kioc)
+{
+	uint32_t	adapno;
+	int		iterator;
+	mraid_mmadp_t*	adapter;
+
+	/*
+	 * When the kioc returns from driver, make sure it still doesn't
+	 * have ENODATA in status. Otherwise, driver will hang on wait_event
+	 * forever
+	 */
+	if (kioc->status == -ENODATA) {
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid cmm: lld didn't change status!\n"));
+
+		kioc->status = -EINVAL;
+	}
+
+	/*
+	 * Check if this kioc was timedout before. If so, nobody is waiting
+	 * on this kioc. We don't have to wake up anybody. Instead, we just
+	 * have to free the kioc
+	 */
+	if (kioc->timedout) {
+		iterator	= 0;
+		adapter		= NULL;
+		adapno		= kioc->adapno;
+
+		con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
+					"ioctl that was timedout before\n"));
+
+		list_for_each_entry(adapter, &adapters_list_g, list) {
+			if (iterator++ == adapno) break;
+		}
+
+		kioc->timedout = 0;
+
+		if (adapter) {
+			mraid_mm_dealloc_kioc( adapter, kioc );
+		}
+	}
+	else {
+		wake_up(&wait_q);
+	}
+}
+
+
+/**
+ * lld_timedout	- callback from the expired timer
+ * @ptr		: ioctl packet that timed out
+ */
+static void
+lld_timedout(unsigned long ptr)
+{
+	uioc_t *kioc	= (uioc_t *)ptr;
+
+	kioc->status 	= -ETIME;
+	kioc->timedout	= 1;
+
+	con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n"));
+
+	wake_up(&wait_q);
+}
+
+
+/**
+ * kioc_to_mimd	- Converter from new back to old format
+ * @kioc	: Kernel space IOCTL packet (successfully issued)
+ * @mimd	: User space MIMD packet
+ */
+static int
+kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd)
+{
+	mimd_t			kmimd;
+	uint8_t			opcode;
+	uint8_t			subopcode;
+
+	mbox64_t		*mbox64;
+	mraid_passthru_t	__user *upthru32;
+	mraid_passthru_t	*kpthru32;
+	mcontroller_t		cinfo;
+	mraid_hba_info_t	*hinfo;
+
+
+	if (copy_from_user(&kmimd, mimd, sizeof(mimd_t)))
+		return (-EFAULT);
+
+	opcode		= kmimd.ui.fcs.opcode;
+	subopcode	= kmimd.ui.fcs.subopcode;
+
+	if (opcode == 0x82) {
+		switch (subopcode) {
+
+		case MEGAIOC_QADAPINFO:
+
+			hinfo = (mraid_hba_info_t *)(unsigned long)
+					kioc->buf_vaddr;
+
+			hinfo_to_cinfo(hinfo, &cinfo);
+
+			if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo)))
+				return (-EFAULT);
+
+			return 0;
+
+		default:
+			return (-EINVAL);
+		}
+
+		return 0;
+	}
+
+	mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
+
+	if (kioc->user_pthru) {
+
+		upthru32 = kioc->user_pthru;
+		kpthru32 = kioc->pthru32;
+
+		if (copy_to_user(&upthru32->scsistatus,
+					&kpthru32->scsistatus,
+					sizeof(uint8_t))) {
+			return (-EFAULT);
+		}
+	}
+
+	if (kioc->user_data) {
+		if (copy_to_user(kioc->user_data, kioc->buf_vaddr,
+					kioc->user_data_len)) {
+			return (-EFAULT);
+		}
+	}
+
+	if (copy_to_user(&mimd->mbox[17],
+			&mbox64->mbox32.status, sizeof(uint8_t))) {
+		return (-EFAULT);
+	}
+
+	return 0;
+}
+
+
+/**
+ * hinfo_to_cinfo - Convert new format hba info into old format
+ * @hinfo	: New format, more comprehensive adapter info
+ * @cinfo	: Old format adapter info to support mimd_t apps
+ */
+static void
+hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
+{
+	if (!hinfo || !cinfo)
+		return;
+
+	cinfo->base		= hinfo->baseport;
+	cinfo->irq		= hinfo->irq;
+	cinfo->numldrv		= hinfo->num_ldrv;
+	cinfo->pcibus		= hinfo->pci_bus;
+	cinfo->pcidev		= hinfo->pci_slot;
+	cinfo->pcifun		= PCI_FUNC(hinfo->pci_dev_fn);
+	cinfo->pciid		= hinfo->pci_device_id;
+	cinfo->pcivendor	= hinfo->pci_vendor_id;
+	cinfo->pcislot		= hinfo->pci_slot;
+	cinfo->uid		= hinfo->unique_id;
+}
+
+
+/**
+ * mraid_mm_register_adp - Registration routine for low level drivers
+ * @lld_adp	: Adapter object
+ */
+int
+mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
+{
+	mraid_mmadp_t	*adapter;
+	mbox64_t	*mbox_list;
+	uioc_t		*kioc;
+	uint32_t	rval;
+	int		i;
+
+
+	if (lld_adp->drvr_type != DRVRTYPE_MBOX)
+		return (-EINVAL);
+
+	adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
+
+	if (!adapter)
+		return -ENOMEM;
+
+
+	adapter->unique_id	= lld_adp->unique_id;
+	adapter->drvr_type	= lld_adp->drvr_type;
+	adapter->drvr_data	= lld_adp->drvr_data;
+	adapter->pdev		= lld_adp->pdev;
+	adapter->issue_uioc	= lld_adp->issue_uioc;
+	adapter->timeout	= lld_adp->timeout;
+	adapter->max_kioc	= lld_adp->max_kioc;
+	adapter->quiescent	= 1;
+
+	/*
+	 * Allocate single blocks of memory for all required kiocs,
+	 * mailboxes and passthru structures.
+	 */
+	adapter->kioc_list	= kmalloc(sizeof(uioc_t) * lld_adp->max_kioc,
+						GFP_KERNEL);
+	adapter->mbox_list	= kmalloc(sizeof(mbox64_t) * lld_adp->max_kioc,
+						GFP_KERNEL);
+	adapter->pthru_dma_pool = dma_pool_create("megaraid mm pthru pool",
+						&adapter->pdev->dev,
+						sizeof(mraid_passthru_t),
+						16, 0);
+
+	if (!adapter->kioc_list || !adapter->mbox_list ||
+			!adapter->pthru_dma_pool) {
+
+		con_log(CL_ANN, (KERN_WARNING
+			"megaraid cmm: out of memory, %s %d\n", __func__,
+			__LINE__));
+
+		rval = (-ENOMEM);
+
+		goto memalloc_error;
+	}
+
+	/*
+	 * Slice kioc_list and make a kioc_pool with the individiual kiocs
+	 */
+	INIT_LIST_HEAD(&adapter->kioc_pool);
+	spin_lock_init(&adapter->kioc_pool_lock);
+	sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc);
+
+	mbox_list	= (mbox64_t *)adapter->mbox_list;
+
+	for (i = 0; i < lld_adp->max_kioc; i++) {
+
+		kioc		= adapter->kioc_list + i;
+		kioc->cmdbuf	= (uint64_t)(unsigned long)(mbox_list + i);
+		kioc->pthru32	= dma_pool_alloc(adapter->pthru_dma_pool,
+						GFP_KERNEL, &kioc->pthru32_h);
+
+		if (!kioc->pthru32) {
+
+			con_log(CL_ANN, (KERN_WARNING
+				"megaraid cmm: out of memory, %s %d\n",
+					__func__, __LINE__));
+
+			rval = (-ENOMEM);
+
+			goto pthru_dma_pool_error;
+		}
+
+		list_add_tail(&kioc->list, &adapter->kioc_pool);
+	}
+
+	// Setup the dma pools for data buffers
+	if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) {
+		goto dma_pool_error;
+	}
+
+	list_add_tail(&adapter->list, &adapters_list_g);
+
+	adapters_count_g++;
+
+	return 0;
+
+dma_pool_error:
+	/* Do nothing */
+
+pthru_dma_pool_error:
+
+	for (i = 0; i < lld_adp->max_kioc; i++) {
+		kioc = adapter->kioc_list + i;
+		if (kioc->pthru32) {
+			dma_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
+				kioc->pthru32_h);
+		}
+	}
+
+memalloc_error:
+
+	kfree(adapter->kioc_list);
+	kfree(adapter->mbox_list);
+
+	if (adapter->pthru_dma_pool)
+		dma_pool_destroy(adapter->pthru_dma_pool);
+
+	kfree(adapter);
+
+	return rval;
+}
+
+
+/**
+ * mraid_mm_adapter_app_handle - return the application handle for this adapter
+ * @unique_id	: adapter unique identifier
+ *
+ * For the given driver data, locate the adapter in our global list and
+ * return the corresponding handle, which is also used by applications to
+ * uniquely identify an adapter.
+ *
+ * Return adapter handle if found in the list.
+ * Return 0 if adapter could not be located, should never happen though.
+ */
+uint32_t
+mraid_mm_adapter_app_handle(uint32_t unique_id)
+{
+	mraid_mmadp_t	*adapter;
+	mraid_mmadp_t	*tmp;
+	int		index = 0;
+
+	list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
+
+		if (adapter->unique_id == unique_id) {
+
+			return MKADAP(index);
+		}
+
+		index++;
+	}
+
+	return 0;
+}
+
+
+/**
+ * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter
+ * @adp	: Adapter softstate
+ *
+ * We maintain a pool of dma buffers per each adapter. Each pool has one
+ * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers.
+ * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We
+ * dont' want to waste too much memory by allocating more buffers per each
+ * pool.
+ */
+static int
+mraid_mm_setup_dma_pools(mraid_mmadp_t *adp)
+{
+	mm_dmapool_t	*pool;
+	int		bufsize;
+	int		i;
+
+	/*
+	 * Create MAX_DMA_POOLS number of pools
+	 */
+	bufsize = MRAID_MM_INIT_BUFF_SIZE;
+
+	for (i = 0; i < MAX_DMA_POOLS; i++){
+
+		pool = &adp->dma_pool_list[i];
+
+		pool->buf_size = bufsize;
+		spin_lock_init(&pool->lock);
+
+		pool->handle = dma_pool_create("megaraid mm data buffer",
+						&adp->pdev->dev, bufsize,
+						16, 0);
+
+		if (!pool->handle) {
+			goto dma_pool_setup_error;
+		}
+
+		pool->vaddr = dma_pool_alloc(pool->handle, GFP_KERNEL,
+							&pool->paddr);
+
+		if (!pool->vaddr)
+			goto dma_pool_setup_error;
+
+		bufsize = bufsize * 2;
+	}
+
+	return 0;
+
+dma_pool_setup_error:
+
+	mraid_mm_teardown_dma_pools(adp);
+	return (-ENOMEM);
+}
+
+
+/**
+ * mraid_mm_unregister_adp - Unregister routine for low level drivers
+ * @unique_id	: UID of the adpater
+ *
+ * Assumes no outstanding ioctls to llds.
+ */
+int
+mraid_mm_unregister_adp(uint32_t unique_id)
+{
+	mraid_mmadp_t	*adapter;
+	mraid_mmadp_t	*tmp;
+
+	list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
+
+
+		if (adapter->unique_id == unique_id) {
+
+			adapters_count_g--;
+
+			list_del_init(&adapter->list);
+
+			mraid_mm_free_adp_resources(adapter);
+
+			kfree(adapter);
+
+			con_log(CL_ANN, (
+				"megaraid cmm: Unregistered one adapter:%#x\n",
+				unique_id));
+
+			return 0;
+		}
+	}
+
+	return (-ENODEV);
+}
+
+/**
+ * mraid_mm_free_adp_resources - Free adapter softstate
+ * @adp	: Adapter softstate
+ */
+static void
+mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
+{
+	uioc_t	*kioc;
+	int	i;
+
+	mraid_mm_teardown_dma_pools(adp);
+
+	for (i = 0; i < adp->max_kioc; i++) {
+
+		kioc = adp->kioc_list + i;
+
+		dma_pool_free(adp->pthru_dma_pool, kioc->pthru32,
+				kioc->pthru32_h);
+	}
+
+	kfree(adp->kioc_list);
+	kfree(adp->mbox_list);
+
+	dma_pool_destroy(adp->pthru_dma_pool);
+
+
+	return;
+}
+
+
+/**
+ * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers
+ * @adp	: Adapter softstate
+ */
+static void
+mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
+{
+	int		i;
+	mm_dmapool_t	*pool;
+
+	for (i = 0; i < MAX_DMA_POOLS; i++) {
+
+		pool = &adp->dma_pool_list[i];
+
+		if (pool->handle) {
+
+			if (pool->vaddr)
+				dma_pool_free(pool->handle, pool->vaddr,
+							pool->paddr);
+
+			dma_pool_destroy(pool->handle);
+			pool->handle = NULL;
+		}
+	}
+
+	return;
+}
+
+/**
+ * mraid_mm_init	- Module entry point
+ */
+static int __init
+mraid_mm_init(void)
+{
+	int err;
+
+	// Announce the driver version
+	con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n",
+		LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION));
+
+	err = misc_register(&megaraid_mm_dev);
+	if (err < 0) {
+		con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n"));
+		return err;
+	}
+
+	init_waitqueue_head(&wait_q);
+
+	INIT_LIST_HEAD(&adapters_list_g);
+
+	return 0;
+}
+
+
+#ifdef CONFIG_COMPAT
+/**
+ * mraid_mm_compat_ioctl	- 32bit to 64bit ioctl conversion routine
+ * @filep	: file operations pointer (ignored)
+ * @cmd		: ioctl command
+ * @arg		: user ioctl packet
+ */
+static long
+mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd,
+		      unsigned long arg)
+{
+	int err;
+
+	err = mraid_mm_ioctl(filep, cmd, arg);
+
+	return err;
+}
+#endif
+
+/**
+ * mraid_mm_exit	- Module exit point
+ */
+static void __exit
+mraid_mm_exit(void)
+{
+	con_log(CL_DLEVEL1 , ("exiting common mod\n"));
+
+	misc_deregister(&megaraid_mm_dev);
+}
+
+module_init(mraid_mm_init);
+module_exit(mraid_mm_exit);
+
+/* vi: set ts=8 sw=8 tw=78: */
diff --git a/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_mm.h b/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_mm.h
new file mode 100644
index 0000000..a30e725
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_mm.h
@@ -0,0 +1,101 @@
+/*
+ *
+ *			Linux MegaRAID device driver
+ *
+ * Copyright (c) 2003-2004  LSI Logic Corporation.
+ *
+ *	   This program is free software; you can redistribute it and/or
+ *	   modify it under the terms of the GNU General Public License
+ *	   as published by the Free Software Foundation; either version
+ *	   2 of the License, or (at your option) any later version.
+ *
+ * FILE		: megaraid_mm.h
+ */
+
+#ifndef MEGARAID_MM_H
+#define MEGARAID_MM_H
+
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+
+#include "mbox_defs.h"
+#include "megaraid_ioctl.h"
+
+
+#define LSI_COMMON_MOD_VERSION	"2.20.2.7"
+#define LSI_COMMON_MOD_EXT_VERSION	\
+		"(Release Date: Sun Jul 16 00:01:03 EST 2006)"
+
+
+#define LSI_DBGLVL			dbglevel
+
+// The smallest dma pool
+#define MRAID_MM_INIT_BUFF_SIZE		4096
+
+/**
+ * mimd_t	: Old style ioctl packet structure (deprecated)
+ *
+ * @inlen	:
+ * @outlen	:
+ * @fca		:
+ * @opcode	:
+ * @subopcode	:
+ * @adapno	:
+ * @buffer	:
+ * @pad		:
+ * @length	:
+ * @mbox	:
+ * @pthru	:
+ * @data	:
+ * @pad		:
+ *
+ * Note		: This structure is DEPRECATED. New applications must use
+ *		: uioc_t structure instead. All new hba drivers use the new
+ *		: format. If we get this mimd packet, we will convert it into
+ *		: new uioc_t format and send it to the hba drivers.
+ */
+
+typedef struct mimd {
+
+	uint32_t inlen;
+	uint32_t outlen;
+
+	union {
+		uint8_t fca[16];
+		struct {
+			uint8_t opcode;
+			uint8_t subopcode;
+			uint16_t adapno;
+#if BITS_PER_LONG == 32
+			uint8_t __user *buffer;
+			uint8_t pad[4];
+#endif
+#if BITS_PER_LONG == 64
+			uint8_t __user *buffer;
+#endif
+			uint32_t length;
+		} __attribute__ ((packed)) fcs;
+	} __attribute__ ((packed)) ui;
+
+	uint8_t mbox[18];		/* 16 bytes + 2 status bytes */
+	mraid_passthru_t pthru;
+
+#if BITS_PER_LONG == 32
+	char __user *data;		/* buffer <= 4096 for 0x80 commands */
+	char pad[4];
+#endif
+#if BITS_PER_LONG == 64
+	char __user *data;
+#endif
+
+} __attribute__ ((packed))mimd_t;
+
+#endif // MEGARAID_MM_H
+
+// vi: set ts=8 sw=8 tw=78:
diff --git a/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_sas.h b/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_sas.h
new file mode 100644
index 0000000..81de4a1
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_sas.h
@@ -0,0 +1,2499 @@
+/*
+ *  Linux MegaRAID driver for SAS based RAID controllers
+ *
+ *  Copyright (c) 2003-2013  LSI Corporation
+ *  Copyright (c) 2013-2014  Avago Technologies
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ *  FILE: megaraid_sas.h
+ *
+ *  Authors: Avago Technologies
+ *           Kashyap Desai <kashyap.desai@avagotech.com>
+ *           Sumit Saxena <sumit.saxena@avagotech.com>
+ *
+ *  Send feedback to: megaraidlinux.pdl@avagotech.com
+ *
+ *  Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
+ *  San Jose, California 95131
+ */
+
+#ifndef LSI_MEGARAID_SAS_H
+#define LSI_MEGARAID_SAS_H
+
+/*
+ * MegaRAID SAS Driver meta data
+ */
+#define MEGASAS_VERSION				"07.702.06.00-rc1"
+#define MEGASAS_RELDATE				"June 21, 2017"
+
+/*
+ * Device IDs
+ */
+#define	PCI_DEVICE_ID_LSI_SAS1078R		0x0060
+#define	PCI_DEVICE_ID_LSI_SAS1078DE		0x007C
+#define	PCI_DEVICE_ID_LSI_VERDE_ZCR		0x0413
+#define	PCI_DEVICE_ID_LSI_SAS1078GEN2		0x0078
+#define	PCI_DEVICE_ID_LSI_SAS0079GEN2		0x0079
+#define	PCI_DEVICE_ID_LSI_SAS0073SKINNY		0x0073
+#define	PCI_DEVICE_ID_LSI_SAS0071SKINNY		0x0071
+#define	PCI_DEVICE_ID_LSI_FUSION		0x005b
+#define PCI_DEVICE_ID_LSI_PLASMA		0x002f
+#define PCI_DEVICE_ID_LSI_INVADER		0x005d
+#define PCI_DEVICE_ID_LSI_FURY			0x005f
+#define PCI_DEVICE_ID_LSI_INTRUDER		0x00ce
+#define PCI_DEVICE_ID_LSI_INTRUDER_24		0x00cf
+#define PCI_DEVICE_ID_LSI_CUTLASS_52		0x0052
+#define PCI_DEVICE_ID_LSI_CUTLASS_53		0x0053
+#define PCI_DEVICE_ID_LSI_VENTURA		    0x0014
+#define PCI_DEVICE_ID_LSI_HARPOON		    0x0016
+#define PCI_DEVICE_ID_LSI_TOMCAT		    0x0017
+#define PCI_DEVICE_ID_LSI_VENTURA_4PORT		0x001B
+#define PCI_DEVICE_ID_LSI_CRUSADER_4PORT	0x001C
+
+/*
+ * Intel HBA SSDIDs
+ */
+#define MEGARAID_INTEL_RS3DC080_SSDID		0x9360
+#define MEGARAID_INTEL_RS3DC040_SSDID		0x9362
+#define MEGARAID_INTEL_RS3SC008_SSDID		0x9380
+#define MEGARAID_INTEL_RS3MC044_SSDID		0x9381
+#define MEGARAID_INTEL_RS3WC080_SSDID		0x9341
+#define MEGARAID_INTEL_RS3WC040_SSDID		0x9343
+#define MEGARAID_INTEL_RMS3BC160_SSDID		0x352B
+
+/*
+ * Intruder HBA SSDIDs
+ */
+#define MEGARAID_INTRUDER_SSDID1		0x9371
+#define MEGARAID_INTRUDER_SSDID2		0x9390
+#define MEGARAID_INTRUDER_SSDID3		0x9370
+
+/*
+ * Intel HBA branding
+ */
+#define MEGARAID_INTEL_RS3DC080_BRANDING	\
+	"Intel(R) RAID Controller RS3DC080"
+#define MEGARAID_INTEL_RS3DC040_BRANDING	\
+	"Intel(R) RAID Controller RS3DC040"
+#define MEGARAID_INTEL_RS3SC008_BRANDING	\
+	"Intel(R) RAID Controller RS3SC008"
+#define MEGARAID_INTEL_RS3MC044_BRANDING	\
+	"Intel(R) RAID Controller RS3MC044"
+#define MEGARAID_INTEL_RS3WC080_BRANDING	\
+	"Intel(R) RAID Controller RS3WC080"
+#define MEGARAID_INTEL_RS3WC040_BRANDING	\
+	"Intel(R) RAID Controller RS3WC040"
+#define MEGARAID_INTEL_RMS3BC160_BRANDING	\
+	"Intel(R) Integrated RAID Module RMS3BC160"
+
+/*
+ * =====================================
+ * MegaRAID SAS MFI firmware definitions
+ * =====================================
+ */
+
+/*
+ * MFI stands for  MegaRAID SAS FW Interface. This is just a moniker for
+ * protocol between the software and firmware. Commands are issued using
+ * "message frames"
+ */
+
+/*
+ * FW posts its state in upper 4 bits of outbound_msg_0 register
+ */
+#define MFI_STATE_MASK				0xF0000000
+#define MFI_STATE_UNDEFINED			0x00000000
+#define MFI_STATE_BB_INIT			0x10000000
+#define MFI_STATE_FW_INIT			0x40000000
+#define MFI_STATE_WAIT_HANDSHAKE		0x60000000
+#define MFI_STATE_FW_INIT_2			0x70000000
+#define MFI_STATE_DEVICE_SCAN			0x80000000
+#define MFI_STATE_BOOT_MESSAGE_PENDING		0x90000000
+#define MFI_STATE_FLUSH_CACHE			0xA0000000
+#define MFI_STATE_READY				0xB0000000
+#define MFI_STATE_OPERATIONAL			0xC0000000
+#define MFI_STATE_FAULT				0xF0000000
+#define MFI_STATE_FORCE_OCR			0x00000080
+#define MFI_STATE_DMADONE			0x00000008
+#define MFI_STATE_CRASH_DUMP_DONE		0x00000004
+#define MFI_RESET_REQUIRED			0x00000001
+#define MFI_RESET_ADAPTER			0x00000002
+#define MEGAMFI_FRAME_SIZE			64
+
+/*
+ * During FW init, clear pending cmds & reset state using inbound_msg_0
+ *
+ * ABORT	: Abort all pending cmds
+ * READY	: Move from OPERATIONAL to READY state; discard queue info
+ * MFIMODE	: Discard (possible) low MFA posted in 64-bit mode (??)
+ * CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver
+ * HOTPLUG	: Resume from Hotplug
+ * MFI_STOP_ADP	: Send signal to FW to stop processing
+ */
+#define WRITE_SEQUENCE_OFFSET		(0x0000000FC) /* I20 */
+#define HOST_DIAGNOSTIC_OFFSET		(0x000000F8)  /* I20 */
+#define DIAG_WRITE_ENABLE			(0x00000080)
+#define DIAG_RESET_ADAPTER			(0x00000004)
+
+#define MFI_ADP_RESET				0x00000040
+#define MFI_INIT_ABORT				0x00000001
+#define MFI_INIT_READY				0x00000002
+#define MFI_INIT_MFIMODE			0x00000004
+#define MFI_INIT_CLEAR_HANDSHAKE		0x00000008
+#define MFI_INIT_HOTPLUG			0x00000010
+#define MFI_STOP_ADP				0x00000020
+#define MFI_RESET_FLAGS				MFI_INIT_READY| \
+						MFI_INIT_MFIMODE| \
+						MFI_INIT_ABORT
+#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE    (0x01)
+
+/*
+ * MFI frame flags
+ */
+#define MFI_FRAME_POST_IN_REPLY_QUEUE		0x0000
+#define MFI_FRAME_DONT_POST_IN_REPLY_QUEUE	0x0001
+#define MFI_FRAME_SGL32				0x0000
+#define MFI_FRAME_SGL64				0x0002
+#define MFI_FRAME_SENSE32			0x0000
+#define MFI_FRAME_SENSE64			0x0004
+#define MFI_FRAME_DIR_NONE			0x0000
+#define MFI_FRAME_DIR_WRITE			0x0008
+#define MFI_FRAME_DIR_READ			0x0010
+#define MFI_FRAME_DIR_BOTH			0x0018
+#define MFI_FRAME_IEEE                          0x0020
+
+/* Driver internal */
+#define DRV_DCMD_POLLED_MODE		0x1
+#define DRV_DCMD_SKIP_REFIRE		0x2
+
+/*
+ * Definition for cmd_status
+ */
+#define MFI_CMD_STATUS_POLL_MODE		0xFF
+
+/*
+ * MFI command opcodes
+ */
+#define MFI_CMD_INIT				0x00
+#define MFI_CMD_LD_READ				0x01
+#define MFI_CMD_LD_WRITE			0x02
+#define MFI_CMD_LD_SCSI_IO			0x03
+#define MFI_CMD_PD_SCSI_IO			0x04
+#define MFI_CMD_DCMD				0x05
+#define MFI_CMD_ABORT				0x06
+#define MFI_CMD_SMP				0x07
+#define MFI_CMD_STP				0x08
+#define MFI_CMD_INVALID				0xff
+
+#define MR_DCMD_CTRL_GET_INFO			0x01010000
+#define MR_DCMD_LD_GET_LIST			0x03010000
+#define MR_DCMD_LD_LIST_QUERY			0x03010100
+
+#define MR_DCMD_CTRL_CACHE_FLUSH		0x01101000
+#define MR_FLUSH_CTRL_CACHE			0x01
+#define MR_FLUSH_DISK_CACHE			0x02
+
+#define MR_DCMD_CTRL_SHUTDOWN			0x01050000
+#define MR_DCMD_HIBERNATE_SHUTDOWN		0x01060000
+#define MR_ENABLE_DRIVE_SPINDOWN		0x01
+
+#define MR_DCMD_CTRL_EVENT_GET_INFO		0x01040100
+#define MR_DCMD_CTRL_EVENT_GET			0x01040300
+#define MR_DCMD_CTRL_EVENT_WAIT			0x01040500
+#define MR_DCMD_LD_GET_PROPERTIES		0x03030000
+
+#define MR_DCMD_CLUSTER				0x08000000
+#define MR_DCMD_CLUSTER_RESET_ALL		0x08010100
+#define MR_DCMD_CLUSTER_RESET_LD		0x08010200
+#define MR_DCMD_PD_LIST_QUERY                   0x02010100
+
+#define MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS	0x01190100
+#define MR_DRIVER_SET_APP_CRASHDUMP_MODE	(0xF0010000 | 0x0600)
+#define MR_DCMD_PD_GET_INFO			0x02020000
+
+/*
+ * Global functions
+ */
+extern u8 MR_ValidateMapInfo(struct megasas_instance *instance);
+
+
+/*
+ * MFI command completion codes
+ */
+enum MFI_STAT {
+	MFI_STAT_OK = 0x00,
+	MFI_STAT_INVALID_CMD = 0x01,
+	MFI_STAT_INVALID_DCMD = 0x02,
+	MFI_STAT_INVALID_PARAMETER = 0x03,
+	MFI_STAT_INVALID_SEQUENCE_NUMBER = 0x04,
+	MFI_STAT_ABORT_NOT_POSSIBLE = 0x05,
+	MFI_STAT_APP_HOST_CODE_NOT_FOUND = 0x06,
+	MFI_STAT_APP_IN_USE = 0x07,
+	MFI_STAT_APP_NOT_INITIALIZED = 0x08,
+	MFI_STAT_ARRAY_INDEX_INVALID = 0x09,
+	MFI_STAT_ARRAY_ROW_NOT_EMPTY = 0x0a,
+	MFI_STAT_CONFIG_RESOURCE_CONFLICT = 0x0b,
+	MFI_STAT_DEVICE_NOT_FOUND = 0x0c,
+	MFI_STAT_DRIVE_TOO_SMALL = 0x0d,
+	MFI_STAT_FLASH_ALLOC_FAIL = 0x0e,
+	MFI_STAT_FLASH_BUSY = 0x0f,
+	MFI_STAT_FLASH_ERROR = 0x10,
+	MFI_STAT_FLASH_IMAGE_BAD = 0x11,
+	MFI_STAT_FLASH_IMAGE_INCOMPLETE = 0x12,
+	MFI_STAT_FLASH_NOT_OPEN = 0x13,
+	MFI_STAT_FLASH_NOT_STARTED = 0x14,
+	MFI_STAT_FLUSH_FAILED = 0x15,
+	MFI_STAT_HOST_CODE_NOT_FOUNT = 0x16,
+	MFI_STAT_LD_CC_IN_PROGRESS = 0x17,
+	MFI_STAT_LD_INIT_IN_PROGRESS = 0x18,
+	MFI_STAT_LD_LBA_OUT_OF_RANGE = 0x19,
+	MFI_STAT_LD_MAX_CONFIGURED = 0x1a,
+	MFI_STAT_LD_NOT_OPTIMAL = 0x1b,
+	MFI_STAT_LD_RBLD_IN_PROGRESS = 0x1c,
+	MFI_STAT_LD_RECON_IN_PROGRESS = 0x1d,
+	MFI_STAT_LD_WRONG_RAID_LEVEL = 0x1e,
+	MFI_STAT_MAX_SPARES_EXCEEDED = 0x1f,
+	MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20,
+	MFI_STAT_MFC_HW_ERROR = 0x21,
+	MFI_STAT_NO_HW_PRESENT = 0x22,
+	MFI_STAT_NOT_FOUND = 0x23,
+	MFI_STAT_NOT_IN_ENCL = 0x24,
+	MFI_STAT_PD_CLEAR_IN_PROGRESS = 0x25,
+	MFI_STAT_PD_TYPE_WRONG = 0x26,
+	MFI_STAT_PR_DISABLED = 0x27,
+	MFI_STAT_ROW_INDEX_INVALID = 0x28,
+	MFI_STAT_SAS_CONFIG_INVALID_ACTION = 0x29,
+	MFI_STAT_SAS_CONFIG_INVALID_DATA = 0x2a,
+	MFI_STAT_SAS_CONFIG_INVALID_PAGE = 0x2b,
+	MFI_STAT_SAS_CONFIG_INVALID_TYPE = 0x2c,
+	MFI_STAT_SCSI_DONE_WITH_ERROR = 0x2d,
+	MFI_STAT_SCSI_IO_FAILED = 0x2e,
+	MFI_STAT_SCSI_RESERVATION_CONFLICT = 0x2f,
+	MFI_STAT_SHUTDOWN_FAILED = 0x30,
+	MFI_STAT_TIME_NOT_SET = 0x31,
+	MFI_STAT_WRONG_STATE = 0x32,
+	MFI_STAT_LD_OFFLINE = 0x33,
+	MFI_STAT_PEER_NOTIFICATION_REJECTED = 0x34,
+	MFI_STAT_PEER_NOTIFICATION_FAILED = 0x35,
+	MFI_STAT_RESERVATION_IN_PROGRESS = 0x36,
+	MFI_STAT_I2C_ERRORS_DETECTED = 0x37,
+	MFI_STAT_PCI_ERRORS_DETECTED = 0x38,
+	MFI_STAT_CONFIG_SEQ_MISMATCH = 0x67,
+
+	MFI_STAT_INVALID_STATUS = 0xFF
+};
+
+enum mfi_evt_class {
+	MFI_EVT_CLASS_DEBUG =		-2,
+	MFI_EVT_CLASS_PROGRESS =	-1,
+	MFI_EVT_CLASS_INFO =		0,
+	MFI_EVT_CLASS_WARNING =		1,
+	MFI_EVT_CLASS_CRITICAL =	2,
+	MFI_EVT_CLASS_FATAL =		3,
+	MFI_EVT_CLASS_DEAD =		4
+};
+
+/*
+ * Crash dump related defines
+ */
+#define MAX_CRASH_DUMP_SIZE 512
+#define CRASH_DMA_BUF_SIZE  (1024 * 1024)
+
+enum MR_FW_CRASH_DUMP_STATE {
+	UNAVAILABLE = 0,
+	AVAILABLE = 1,
+	COPYING = 2,
+	COPIED = 3,
+	COPY_ERROR = 4,
+};
+
+enum _MR_CRASH_BUF_STATUS {
+	MR_CRASH_BUF_TURN_OFF = 0,
+	MR_CRASH_BUF_TURN_ON = 1,
+};
+
+/*
+ * Number of mailbox bytes in DCMD message frame
+ */
+#define MFI_MBOX_SIZE				12
+
+enum MR_EVT_CLASS {
+
+	MR_EVT_CLASS_DEBUG = -2,
+	MR_EVT_CLASS_PROGRESS = -1,
+	MR_EVT_CLASS_INFO = 0,
+	MR_EVT_CLASS_WARNING = 1,
+	MR_EVT_CLASS_CRITICAL = 2,
+	MR_EVT_CLASS_FATAL = 3,
+	MR_EVT_CLASS_DEAD = 4,
+
+};
+
+enum MR_EVT_LOCALE {
+
+	MR_EVT_LOCALE_LD = 0x0001,
+	MR_EVT_LOCALE_PD = 0x0002,
+	MR_EVT_LOCALE_ENCL = 0x0004,
+	MR_EVT_LOCALE_BBU = 0x0008,
+	MR_EVT_LOCALE_SAS = 0x0010,
+	MR_EVT_LOCALE_CTRL = 0x0020,
+	MR_EVT_LOCALE_CONFIG = 0x0040,
+	MR_EVT_LOCALE_CLUSTER = 0x0080,
+	MR_EVT_LOCALE_ALL = 0xffff,
+
+};
+
+enum MR_EVT_ARGS {
+
+	MR_EVT_ARGS_NONE,
+	MR_EVT_ARGS_CDB_SENSE,
+	MR_EVT_ARGS_LD,
+	MR_EVT_ARGS_LD_COUNT,
+	MR_EVT_ARGS_LD_LBA,
+	MR_EVT_ARGS_LD_OWNER,
+	MR_EVT_ARGS_LD_LBA_PD_LBA,
+	MR_EVT_ARGS_LD_PROG,
+	MR_EVT_ARGS_LD_STATE,
+	MR_EVT_ARGS_LD_STRIP,
+	MR_EVT_ARGS_PD,
+	MR_EVT_ARGS_PD_ERR,
+	MR_EVT_ARGS_PD_LBA,
+	MR_EVT_ARGS_PD_LBA_LD,
+	MR_EVT_ARGS_PD_PROG,
+	MR_EVT_ARGS_PD_STATE,
+	MR_EVT_ARGS_PCI,
+	MR_EVT_ARGS_RATE,
+	MR_EVT_ARGS_STR,
+	MR_EVT_ARGS_TIME,
+	MR_EVT_ARGS_ECC,
+	MR_EVT_ARGS_LD_PROP,
+	MR_EVT_ARGS_PD_SPARE,
+	MR_EVT_ARGS_PD_INDEX,
+	MR_EVT_ARGS_DIAG_PASS,
+	MR_EVT_ARGS_DIAG_FAIL,
+	MR_EVT_ARGS_PD_LBA_LBA,
+	MR_EVT_ARGS_PORT_PHY,
+	MR_EVT_ARGS_PD_MISSING,
+	MR_EVT_ARGS_PD_ADDRESS,
+	MR_EVT_ARGS_BITMAP,
+	MR_EVT_ARGS_CONNECTOR,
+	MR_EVT_ARGS_PD_PD,
+	MR_EVT_ARGS_PD_FRU,
+	MR_EVT_ARGS_PD_PATHINFO,
+	MR_EVT_ARGS_PD_POWER_STATE,
+	MR_EVT_ARGS_GENERIC,
+};
+
+
+#define SGE_BUFFER_SIZE	4096
+#define MEGASAS_CLUSTER_ID_SIZE	16
+/*
+ * define constants for device list query options
+ */
+enum MR_PD_QUERY_TYPE {
+	MR_PD_QUERY_TYPE_ALL                = 0,
+	MR_PD_QUERY_TYPE_STATE              = 1,
+	MR_PD_QUERY_TYPE_POWER_STATE        = 2,
+	MR_PD_QUERY_TYPE_MEDIA_TYPE         = 3,
+	MR_PD_QUERY_TYPE_SPEED              = 4,
+	MR_PD_QUERY_TYPE_EXPOSED_TO_HOST    = 5,
+};
+
+enum MR_LD_QUERY_TYPE {
+	MR_LD_QUERY_TYPE_ALL	         = 0,
+	MR_LD_QUERY_TYPE_EXPOSED_TO_HOST = 1,
+	MR_LD_QUERY_TYPE_USED_TGT_IDS    = 2,
+	MR_LD_QUERY_TYPE_CLUSTER_ACCESS  = 3,
+	MR_LD_QUERY_TYPE_CLUSTER_LOCALE  = 4,
+};
+
+
+#define MR_EVT_CFG_CLEARED                              0x0004
+#define MR_EVT_LD_STATE_CHANGE                          0x0051
+#define MR_EVT_PD_INSERTED                              0x005b
+#define MR_EVT_PD_REMOVED                               0x0070
+#define MR_EVT_LD_CREATED                               0x008a
+#define MR_EVT_LD_DELETED                               0x008b
+#define MR_EVT_FOREIGN_CFG_IMPORTED                     0x00db
+#define MR_EVT_LD_OFFLINE                               0x00fc
+#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED             0x0152
+#define MR_EVT_CTRL_PROP_CHANGED			0x012f
+
+enum MR_PD_STATE {
+	MR_PD_STATE_UNCONFIGURED_GOOD   = 0x00,
+	MR_PD_STATE_UNCONFIGURED_BAD    = 0x01,
+	MR_PD_STATE_HOT_SPARE           = 0x02,
+	MR_PD_STATE_OFFLINE             = 0x10,
+	MR_PD_STATE_FAILED              = 0x11,
+	MR_PD_STATE_REBUILD             = 0x14,
+	MR_PD_STATE_ONLINE              = 0x18,
+	MR_PD_STATE_COPYBACK            = 0x20,
+	MR_PD_STATE_SYSTEM              = 0x40,
+ };
+
+union MR_PD_REF {
+	struct {
+		u16	 deviceId;
+		u16	 seqNum;
+	} mrPdRef;
+	u32	 ref;
+};
+
+/*
+ * define the DDF Type bit structure
+ */
+union MR_PD_DDF_TYPE {
+	 struct {
+		union {
+			struct {
+#ifndef __BIG_ENDIAN_BITFIELD
+				 u16	 forcedPDGUID:1;
+				 u16	 inVD:1;
+				 u16	 isGlobalSpare:1;
+				 u16	 isSpare:1;
+				 u16	 isForeign:1;
+				 u16	 reserved:7;
+				 u16	 intf:4;
+#else
+				 u16	 intf:4;
+				 u16	 reserved:7;
+				 u16	 isForeign:1;
+				 u16	 isSpare:1;
+				 u16	 isGlobalSpare:1;
+				 u16	 inVD:1;
+				 u16	 forcedPDGUID:1;
+#endif
+			 } pdType;
+			 u16	 type;
+		 };
+		 u16	 reserved;
+	 } ddf;
+	 struct {
+		 u32	reserved;
+	 } nonDisk;
+	 u32	 type;
+} __packed;
+
+/*
+ * defines the progress structure
+ */
+union MR_PROGRESS {
+	struct  {
+		u16 progress;
+		union {
+			u16 elapsedSecs;
+			u16 elapsedSecsForLastPercent;
+		};
+	} mrProgress;
+	u32 w;
+} __packed;
+
+/*
+ * defines the physical drive progress structure
+ */
+struct MR_PD_PROGRESS {
+	struct {
+#ifndef MFI_BIG_ENDIAN
+		u32     rbld:1;
+		u32     patrol:1;
+		u32     clear:1;
+		u32     copyBack:1;
+		u32     erase:1;
+		u32     locate:1;
+		u32     reserved:26;
+#else
+		u32     reserved:26;
+		u32     locate:1;
+		u32     erase:1;
+		u32     copyBack:1;
+		u32     clear:1;
+		u32     patrol:1;
+		u32     rbld:1;
+#endif
+	} active;
+	union MR_PROGRESS     rbld;
+	union MR_PROGRESS     patrol;
+	union {
+		union MR_PROGRESS     clear;
+		union MR_PROGRESS     erase;
+	};
+
+	struct {
+#ifndef MFI_BIG_ENDIAN
+		u32     rbld:1;
+		u32     patrol:1;
+		u32     clear:1;
+		u32     copyBack:1;
+		u32     erase:1;
+		u32     reserved:27;
+#else
+		u32     reserved:27;
+		u32     erase:1;
+		u32     copyBack:1;
+		u32     clear:1;
+		u32     patrol:1;
+		u32     rbld:1;
+#endif
+	} pause;
+
+	union MR_PROGRESS     reserved[3];
+} __packed;
+
+struct  MR_PD_INFO {
+	union MR_PD_REF	ref;
+	u8 inquiryData[96];
+	u8 vpdPage83[64];
+	u8 notSupported;
+	u8 scsiDevType;
+
+	union {
+		u8 connectedPortBitmap;
+		u8 connectedPortNumbers;
+	};
+
+	u8 deviceSpeed;
+	u32 mediaErrCount;
+	u32 otherErrCount;
+	u32 predFailCount;
+	u32 lastPredFailEventSeqNum;
+
+	u16 fwState;
+	u8 disabledForRemoval;
+	u8 linkSpeed;
+	union MR_PD_DDF_TYPE state;
+
+	struct {
+		u8 count;
+#ifndef __BIG_ENDIAN_BITFIELD
+		u8 isPathBroken:4;
+		u8 reserved3:3;
+		u8 widePortCapable:1;
+#else
+		u8 widePortCapable:1;
+		u8 reserved3:3;
+		u8 isPathBroken:4;
+#endif
+
+		u8 connectorIndex[2];
+		u8 reserved[4];
+		u64 sasAddr[2];
+		u8 reserved2[16];
+	} pathInfo;
+
+	u64 rawSize;
+	u64 nonCoercedSize;
+	u64 coercedSize;
+	u16 enclDeviceId;
+	u8 enclIndex;
+
+	union {
+		u8 slotNumber;
+		u8 enclConnectorIndex;
+	};
+
+	struct MR_PD_PROGRESS progInfo;
+	u8 badBlockTableFull;
+	u8 unusableInCurrentConfig;
+	u8 vpdPage83Ext[64];
+	u8 powerState;
+	u8 enclPosition;
+	u32 allowedOps;
+	u16 copyBackPartnerId;
+	u16 enclPartnerDeviceId;
+	struct {
+#ifndef __BIG_ENDIAN_BITFIELD
+		u16 fdeCapable:1;
+		u16 fdeEnabled:1;
+		u16 secured:1;
+		u16 locked:1;
+		u16 foreign:1;
+		u16 needsEKM:1;
+		u16 reserved:10;
+#else
+		u16 reserved:10;
+		u16 needsEKM:1;
+		u16 foreign:1;
+		u16 locked:1;
+		u16 secured:1;
+		u16 fdeEnabled:1;
+		u16 fdeCapable:1;
+#endif
+	} security;
+	u8 mediaType;
+	u8 notCertified;
+	u8 bridgeVendor[8];
+	u8 bridgeProductIdentification[16];
+	u8 bridgeProductRevisionLevel[4];
+	u8 satBridgeExists;
+
+	u8 interfaceType;
+	u8 temperature;
+	u8 emulatedBlockSize;
+	u16 userDataBlockSize;
+	u16 reserved2;
+
+	struct {
+#ifndef __BIG_ENDIAN_BITFIELD
+		u32 piType:3;
+		u32 piFormatted:1;
+		u32 piEligible:1;
+		u32 NCQ:1;
+		u32 WCE:1;
+		u32 commissionedSpare:1;
+		u32 emergencySpare:1;
+		u32 ineligibleForSSCD:1;
+		u32 ineligibleForLd:1;
+		u32 useSSEraseType:1;
+		u32 wceUnchanged:1;
+		u32 supportScsiUnmap:1;
+		u32 reserved:18;
+#else
+		u32 reserved:18;
+		u32 supportScsiUnmap:1;
+		u32 wceUnchanged:1;
+		u32 useSSEraseType:1;
+		u32 ineligibleForLd:1;
+		u32 ineligibleForSSCD:1;
+		u32 emergencySpare:1;
+		u32 commissionedSpare:1;
+		u32 WCE:1;
+		u32 NCQ:1;
+		u32 piEligible:1;
+		u32 piFormatted:1;
+		u32 piType:3;
+#endif
+	} properties;
+
+	u64 shieldDiagCompletionTime;
+	u8 shieldCounter;
+
+	u8 linkSpeedOther;
+	u8 reserved4[2];
+
+	struct {
+#ifndef __BIG_ENDIAN_BITFIELD
+		u32 bbmErrCountSupported:1;
+		u32 bbmErrCount:31;
+#else
+		u32 bbmErrCount:31;
+		u32 bbmErrCountSupported:1;
+#endif
+	} bbmErr;
+
+	u8 reserved1[512-428];
+} __packed;
+
+/*
+ * Definition of structure used to expose attributes of VD or JBOD
+ * (this structure is to be filled by firmware when MR_DCMD_DRV_GET_TARGET_PROP
+ * is fired by driver)
+ */
+struct MR_TARGET_PROPERTIES {
+	u32    max_io_size_kb;
+	u32    device_qdepth;
+	u32    sector_size;
+	u8     reserved[500];
+} __packed;
+
+ /*
+ * defines the physical drive address structure
+ */
+struct MR_PD_ADDRESS {
+	__le16	deviceId;
+	u16     enclDeviceId;
+
+	union {
+		struct {
+			u8  enclIndex;
+			u8  slotNumber;
+		} mrPdAddress;
+		struct {
+			u8  enclPosition;
+			u8  enclConnectorIndex;
+		} mrEnclAddress;
+	};
+	u8      scsiDevType;
+	union {
+		u8      connectedPortBitmap;
+		u8      connectedPortNumbers;
+	};
+	u64     sasAddr[2];
+} __packed;
+
+/*
+ * defines the physical drive list structure
+ */
+struct MR_PD_LIST {
+	__le32		size;
+	__le32		count;
+	struct MR_PD_ADDRESS   addr[1];
+} __packed;
+
+struct megasas_pd_list {
+	u16             tid;
+	u8             driveType;
+	u8             driveState;
+} __packed;
+
+ /*
+ * defines the logical drive reference structure
+ */
+union  MR_LD_REF {
+	struct {
+		u8      targetId;
+		u8      reserved;
+		__le16     seqNum;
+	};
+	__le32     ref;
+} __packed;
+
+/*
+ * defines the logical drive list structure
+ */
+struct MR_LD_LIST {
+	__le32     ldCount;
+	__le32     reserved;
+	struct {
+		union MR_LD_REF   ref;
+		u8          state;
+		u8          reserved[3];
+		__le64		size;
+	} ldList[MAX_LOGICAL_DRIVES_EXT];
+} __packed;
+
+struct MR_LD_TARGETID_LIST {
+	__le32	size;
+	__le32	count;
+	u8	pad[3];
+	u8	targetId[MAX_LOGICAL_DRIVES_EXT];
+};
+
+
+/*
+ * SAS controller properties
+ */
+struct megasas_ctrl_prop {
+
+	u16 seq_num;
+	u16 pred_fail_poll_interval;
+	u16 intr_throttle_count;
+	u16 intr_throttle_timeouts;
+	u8 rebuild_rate;
+	u8 patrol_read_rate;
+	u8 bgi_rate;
+	u8 cc_rate;
+	u8 recon_rate;
+	u8 cache_flush_interval;
+	u8 spinup_drv_count;
+	u8 spinup_delay;
+	u8 cluster_enable;
+	u8 coercion_mode;
+	u8 alarm_enable;
+	u8 disable_auto_rebuild;
+	u8 disable_battery_warn;
+	u8 ecc_bucket_size;
+	u16 ecc_bucket_leak_rate;
+	u8 restore_hotspare_on_insertion;
+	u8 expose_encl_devices;
+	u8 maintainPdFailHistory;
+	u8 disallowHostRequestReordering;
+	u8 abortCCOnError;
+	u8 loadBalanceMode;
+	u8 disableAutoDetectBackplane;
+
+	u8 snapVDSpace;
+
+	/*
+	* Add properties that can be controlled by
+	* a bit in the following structure.
+	*/
+	struct {
+#if   defined(__BIG_ENDIAN_BITFIELD)
+		u32     reserved:18;
+		u32     enableJBOD:1;
+		u32     disableSpinDownHS:1;
+		u32     allowBootWithPinnedCache:1;
+		u32     disableOnlineCtrlReset:1;
+		u32     enableSecretKeyControl:1;
+		u32     autoEnhancedImport:1;
+		u32     enableSpinDownUnconfigured:1;
+		u32     SSDPatrolReadEnabled:1;
+		u32     SSDSMARTerEnabled:1;
+		u32     disableNCQ:1;
+		u32     useFdeOnly:1;
+		u32     prCorrectUnconfiguredAreas:1;
+		u32     SMARTerEnabled:1;
+		u32     copyBackDisabled:1;
+#else
+		u32     copyBackDisabled:1;
+		u32     SMARTerEnabled:1;
+		u32     prCorrectUnconfiguredAreas:1;
+		u32     useFdeOnly:1;
+		u32     disableNCQ:1;
+		u32     SSDSMARTerEnabled:1;
+		u32     SSDPatrolReadEnabled:1;
+		u32     enableSpinDownUnconfigured:1;
+		u32     autoEnhancedImport:1;
+		u32     enableSecretKeyControl:1;
+		u32     disableOnlineCtrlReset:1;
+		u32     allowBootWithPinnedCache:1;
+		u32     disableSpinDownHS:1;
+		u32     enableJBOD:1;
+		u32     reserved:18;
+#endif
+	} OnOffProperties;
+	u8 autoSnapVDSpace;
+	u8 viewSpace;
+	__le16 spinDownTime;
+	u8  reserved[24];
+} __packed;
+
+/*
+ * SAS controller information
+ */
+struct megasas_ctrl_info {
+
+	/*
+	 * PCI device information
+	 */
+	struct {
+
+		__le16 vendor_id;
+		__le16 device_id;
+		__le16 sub_vendor_id;
+		__le16 sub_device_id;
+		u8 reserved[24];
+
+	} __attribute__ ((packed)) pci;
+
+	/*
+	 * Host interface information
+	 */
+	struct {
+
+		u8 PCIX:1;
+		u8 PCIE:1;
+		u8 iSCSI:1;
+		u8 SAS_3G:1;
+		u8 SRIOV:1;
+		u8 reserved_0:3;
+		u8 reserved_1[6];
+		u8 port_count;
+		u64 port_addr[8];
+
+	} __attribute__ ((packed)) host_interface;
+
+	/*
+	 * Device (backend) interface information
+	 */
+	struct {
+
+		u8 SPI:1;
+		u8 SAS_3G:1;
+		u8 SATA_1_5G:1;
+		u8 SATA_3G:1;
+		u8 reserved_0:4;
+		u8 reserved_1[6];
+		u8 port_count;
+		u64 port_addr[8];
+
+	} __attribute__ ((packed)) device_interface;
+
+	/*
+	 * List of components residing in flash. All str are null terminated
+	 */
+	__le32 image_check_word;
+	__le32 image_component_count;
+
+	struct {
+
+		char name[8];
+		char version[32];
+		char build_date[16];
+		char built_time[16];
+
+	} __attribute__ ((packed)) image_component[8];
+
+	/*
+	 * List of flash components that have been flashed on the card, but
+	 * are not in use, pending reset of the adapter. This list will be
+	 * empty if a flash operation has not occurred. All stings are null
+	 * terminated
+	 */
+	__le32 pending_image_component_count;
+
+	struct {
+
+		char name[8];
+		char version[32];
+		char build_date[16];
+		char build_time[16];
+
+	} __attribute__ ((packed)) pending_image_component[8];
+
+	u8 max_arms;
+	u8 max_spans;
+	u8 max_arrays;
+	u8 max_lds;
+
+	char product_name[80];
+	char serial_no[32];
+
+	/*
+	 * Other physical/controller/operation information. Indicates the
+	 * presence of the hardware
+	 */
+	struct {
+
+		u32 bbu:1;
+		u32 alarm:1;
+		u32 nvram:1;
+		u32 uart:1;
+		u32 reserved:28;
+
+	} __attribute__ ((packed)) hw_present;
+
+	__le32 current_fw_time;
+
+	/*
+	 * Maximum data transfer sizes
+	 */
+	__le16 max_concurrent_cmds;
+	__le16 max_sge_count;
+	__le32 max_request_size;
+
+	/*
+	 * Logical and physical device counts
+	 */
+	__le16 ld_present_count;
+	__le16 ld_degraded_count;
+	__le16 ld_offline_count;
+
+	__le16 pd_present_count;
+	__le16 pd_disk_present_count;
+	__le16 pd_disk_pred_failure_count;
+	__le16 pd_disk_failed_count;
+
+	/*
+	 * Memory size information
+	 */
+	__le16 nvram_size;
+	__le16 memory_size;
+	__le16 flash_size;
+
+	/*
+	 * Error counters
+	 */
+	__le16 mem_correctable_error_count;
+	__le16 mem_uncorrectable_error_count;
+
+	/*
+	 * Cluster information
+	 */
+	u8 cluster_permitted;
+	u8 cluster_active;
+
+	/*
+	 * Additional max data transfer sizes
+	 */
+	__le16 max_strips_per_io;
+
+	/*
+	 * Controller capabilities structures
+	 */
+	struct {
+
+		u32 raid_level_0:1;
+		u32 raid_level_1:1;
+		u32 raid_level_5:1;
+		u32 raid_level_1E:1;
+		u32 raid_level_6:1;
+		u32 reserved:27;
+
+	} __attribute__ ((packed)) raid_levels;
+
+	struct {
+
+		u32 rbld_rate:1;
+		u32 cc_rate:1;
+		u32 bgi_rate:1;
+		u32 recon_rate:1;
+		u32 patrol_rate:1;
+		u32 alarm_control:1;
+		u32 cluster_supported:1;
+		u32 bbu:1;
+		u32 spanning_allowed:1;
+		u32 dedicated_hotspares:1;
+		u32 revertible_hotspares:1;
+		u32 foreign_config_import:1;
+		u32 self_diagnostic:1;
+		u32 mixed_redundancy_arr:1;
+		u32 global_hot_spares:1;
+		u32 reserved:17;
+
+	} __attribute__ ((packed)) adapter_operations;
+
+	struct {
+
+		u32 read_policy:1;
+		u32 write_policy:1;
+		u32 io_policy:1;
+		u32 access_policy:1;
+		u32 disk_cache_policy:1;
+		u32 reserved:27;
+
+	} __attribute__ ((packed)) ld_operations;
+
+	struct {
+
+		u8 min;
+		u8 max;
+		u8 reserved[2];
+
+	} __attribute__ ((packed)) stripe_sz_ops;
+
+	struct {
+
+		u32 force_online:1;
+		u32 force_offline:1;
+		u32 force_rebuild:1;
+		u32 reserved:29;
+
+	} __attribute__ ((packed)) pd_operations;
+
+	struct {
+
+		u32 ctrl_supports_sas:1;
+		u32 ctrl_supports_sata:1;
+		u32 allow_mix_in_encl:1;
+		u32 allow_mix_in_ld:1;
+		u32 allow_sata_in_cluster:1;
+		u32 reserved:27;
+
+	} __attribute__ ((packed)) pd_mix_support;
+
+	/*
+	 * Define ECC single-bit-error bucket information
+	 */
+	u8 ecc_bucket_count;
+	u8 reserved_2[11];
+
+	/*
+	 * Include the controller properties (changeable items)
+	 */
+	struct megasas_ctrl_prop properties;
+
+	/*
+	 * Define FW pkg version (set in envt v'bles on OEM basis)
+	 */
+	char package_version[0x60];
+
+
+	/*
+	* If adapterOperations.supportMoreThan8Phys is set,
+	* and deviceInterface.portCount is greater than 8,
+	* SAS Addrs for first 8 ports shall be populated in
+	* deviceInterface.portAddr, and the rest shall be
+	* populated in deviceInterfacePortAddr2.
+	*/
+	__le64	    deviceInterfacePortAddr2[8]; /*6a0h */
+	u8          reserved3[128];              /*6e0h */
+
+	struct {                                /*760h */
+		u16 minPdRaidLevel_0:4;
+		u16 maxPdRaidLevel_0:12;
+
+		u16 minPdRaidLevel_1:4;
+		u16 maxPdRaidLevel_1:12;
+
+		u16 minPdRaidLevel_5:4;
+		u16 maxPdRaidLevel_5:12;
+
+		u16 minPdRaidLevel_1E:4;
+		u16 maxPdRaidLevel_1E:12;
+
+		u16 minPdRaidLevel_6:4;
+		u16 maxPdRaidLevel_6:12;
+
+		u16 minPdRaidLevel_10:4;
+		u16 maxPdRaidLevel_10:12;
+
+		u16 minPdRaidLevel_50:4;
+		u16 maxPdRaidLevel_50:12;
+
+		u16 minPdRaidLevel_60:4;
+		u16 maxPdRaidLevel_60:12;
+
+		u16 minPdRaidLevel_1E_RLQ0:4;
+		u16 maxPdRaidLevel_1E_RLQ0:12;
+
+		u16 minPdRaidLevel_1E0_RLQ0:4;
+		u16 maxPdRaidLevel_1E0_RLQ0:12;
+
+		u16 reserved[6];
+	} pdsForRaidLevels;
+
+	__le16 maxPds;                          /*780h */
+	__le16 maxDedHSPs;                      /*782h */
+	__le16 maxGlobalHSP;                    /*784h */
+	__le16 ddfSize;                         /*786h */
+	u8  maxLdsPerArray;                     /*788h */
+	u8  partitionsInDDF;                    /*789h */
+	u8  lockKeyBinding;                     /*78ah */
+	u8  maxPITsPerLd;                       /*78bh */
+	u8  maxViewsPerLd;                      /*78ch */
+	u8  maxTargetId;                        /*78dh */
+	__le16 maxBvlVdSize;                    /*78eh */
+
+	__le16 maxConfigurableSSCSize;          /*790h */
+	__le16 currentSSCsize;                  /*792h */
+
+	char    expanderFwVersion[12];          /*794h */
+
+	__le16 PFKTrialTimeRemaining;           /*7A0h */
+
+	__le16 cacheMemorySize;                 /*7A2h */
+
+	struct {                                /*7A4h */
+#if   defined(__BIG_ENDIAN_BITFIELD)
+		u32     reserved:5;
+		u32	activePassive:2;
+		u32	supportConfigAutoBalance:1;
+		u32	mpio:1;
+		u32	supportDataLDonSSCArray:1;
+		u32	supportPointInTimeProgress:1;
+		u32     supportUnevenSpans:1;
+		u32     dedicatedHotSparesLimited:1;
+		u32     headlessMode:1;
+		u32     supportEmulatedDrives:1;
+		u32     supportResetNow:1;
+		u32     realTimeScheduler:1;
+		u32     supportSSDPatrolRead:1;
+		u32     supportPerfTuning:1;
+		u32     disableOnlinePFKChange:1;
+		u32     supportJBOD:1;
+		u32     supportBootTimePFKChange:1;
+		u32     supportSetLinkSpeed:1;
+		u32     supportEmergencySpares:1;
+		u32     supportSuspendResumeBGops:1;
+		u32     blockSSDWriteCacheChange:1;
+		u32     supportShieldState:1;
+		u32     supportLdBBMInfo:1;
+		u32     supportLdPIType3:1;
+		u32     supportLdPIType2:1;
+		u32     supportLdPIType1:1;
+		u32     supportPIcontroller:1;
+#else
+		u32     supportPIcontroller:1;
+		u32     supportLdPIType1:1;
+		u32     supportLdPIType2:1;
+		u32     supportLdPIType3:1;
+		u32     supportLdBBMInfo:1;
+		u32     supportShieldState:1;
+		u32     blockSSDWriteCacheChange:1;
+		u32     supportSuspendResumeBGops:1;
+		u32     supportEmergencySpares:1;
+		u32     supportSetLinkSpeed:1;
+		u32     supportBootTimePFKChange:1;
+		u32     supportJBOD:1;
+		u32     disableOnlinePFKChange:1;
+		u32     supportPerfTuning:1;
+		u32     supportSSDPatrolRead:1;
+		u32     realTimeScheduler:1;
+
+		u32     supportResetNow:1;
+		u32     supportEmulatedDrives:1;
+		u32     headlessMode:1;
+		u32     dedicatedHotSparesLimited:1;
+
+
+		u32     supportUnevenSpans:1;
+		u32	supportPointInTimeProgress:1;
+		u32	supportDataLDonSSCArray:1;
+		u32	mpio:1;
+		u32	supportConfigAutoBalance:1;
+		u32	activePassive:2;
+		u32     reserved:5;
+#endif
+	} adapterOperations2;
+
+	u8  driverVersion[32];                  /*7A8h */
+	u8  maxDAPdCountSpinup60;               /*7C8h */
+	u8  temperatureROC;                     /*7C9h */
+	u8  temperatureCtrl;                    /*7CAh */
+	u8  reserved4;                          /*7CBh */
+	__le16 maxConfigurablePds;              /*7CCh */
+
+
+	u8  reserved5[2];                       /*0x7CDh */
+
+	/*
+	* HA cluster information
+	*/
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u32     reserved:25;
+		u32     passive:1;
+		u32     premiumFeatureMismatch:1;
+		u32     ctrlPropIncompatible:1;
+		u32     fwVersionMismatch:1;
+		u32     hwIncompatible:1;
+		u32     peerIsIncompatible:1;
+		u32     peerIsPresent:1;
+#else
+		u32     peerIsPresent:1;
+		u32     peerIsIncompatible:1;
+		u32     hwIncompatible:1;
+		u32     fwVersionMismatch:1;
+		u32     ctrlPropIncompatible:1;
+		u32     premiumFeatureMismatch:1;
+		u32     passive:1;
+		u32     reserved:25;
+#endif
+	} cluster;
+
+	char clusterId[MEGASAS_CLUSTER_ID_SIZE]; /*0x7D4 */
+	struct {
+		u8  maxVFsSupported;            /*0x7E4*/
+		u8  numVFsEnabled;              /*0x7E5*/
+		u8  requestorId;                /*0x7E6 0:PF, 1:VF1, 2:VF2*/
+		u8  reserved;                   /*0x7E7*/
+	} iov;
+
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u32     reserved:7;
+		u32     useSeqNumJbodFP:1;
+		u32     supportExtendedSSCSize:1;
+		u32     supportDiskCacheSettingForSysPDs:1;
+		u32     supportCPLDUpdate:1;
+		u32     supportTTYLogCompression:1;
+		u32     discardCacheDuringLDDelete:1;
+		u32     supportSecurityonJBOD:1;
+		u32     supportCacheBypassModes:1;
+		u32     supportDisableSESMonitoring:1;
+		u32     supportForceFlash:1;
+		u32     supportNVDRAM:1;
+		u32     supportDrvActivityLEDSetting:1;
+		u32     supportAllowedOpsforDrvRemoval:1;
+		u32     supportHOQRebuild:1;
+		u32     supportForceTo512e:1;
+		u32     supportNVCacheErase:1;
+		u32     supportDebugQueue:1;
+		u32     supportSwZone:1;
+		u32     supportCrashDump:1;
+		u32     supportMaxExtLDs:1;
+		u32     supportT10RebuildAssist:1;
+		u32     supportDisableImmediateIO:1;
+		u32     supportThermalPollInterval:1;
+		u32     supportPersonalityChange:2;
+#else
+		u32     supportPersonalityChange:2;
+		u32     supportThermalPollInterval:1;
+		u32     supportDisableImmediateIO:1;
+		u32     supportT10RebuildAssist:1;
+		u32	supportMaxExtLDs:1;
+		u32	supportCrashDump:1;
+		u32     supportSwZone:1;
+		u32     supportDebugQueue:1;
+		u32     supportNVCacheErase:1;
+		u32     supportForceTo512e:1;
+		u32     supportHOQRebuild:1;
+		u32     supportAllowedOpsforDrvRemoval:1;
+		u32     supportDrvActivityLEDSetting:1;
+		u32     supportNVDRAM:1;
+		u32     supportForceFlash:1;
+		u32     supportDisableSESMonitoring:1;
+		u32     supportCacheBypassModes:1;
+		u32     supportSecurityonJBOD:1;
+		u32     discardCacheDuringLDDelete:1;
+		u32     supportTTYLogCompression:1;
+		u32     supportCPLDUpdate:1;
+		u32     supportDiskCacheSettingForSysPDs:1;
+		u32     supportExtendedSSCSize:1;
+		u32     useSeqNumJbodFP:1;
+		u32     reserved:7;
+#endif
+	} adapterOperations3;
+
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+	u8 reserved:7;
+	/* Indicates whether the CPLD image is part of
+	 *  the package and stored in flash
+	 */
+	u8 cpld_in_flash:1;
+#else
+	u8 cpld_in_flash:1;
+	u8 reserved:7;
+#endif
+	u8 reserved1[3];
+	/* Null terminated string. Has the version
+	 *  information if cpld_in_flash = FALSE
+	 */
+	u8 userCodeDefinition[12];
+	} cpld;  /* Valid only if upgradableCPLD is TRUE */
+
+	struct {
+	#if defined(__BIG_ENDIAN_BITFIELD)
+		u16 reserved:8;
+		u16 fw_swaps_bbu_vpd_info:1;
+		u16 support_pd_map_target_id:1;
+		u16 support_ses_ctrl_in_multipathcfg:1;
+		u16 image_upload_supported:1;
+		u16 support_encrypted_mfc:1;
+		u16 supported_enc_algo:1;
+		u16 support_ibutton_less:1;
+		u16 ctrl_info_ext_supported:1;
+	#else
+
+		u16 ctrl_info_ext_supported:1;
+		u16 support_ibutton_less:1;
+		u16 supported_enc_algo:1;
+		u16 support_encrypted_mfc:1;
+		u16 image_upload_supported:1;
+		/* FW supports LUN based association and target port based */
+		u16 support_ses_ctrl_in_multipathcfg:1;
+		/* association for the SES device connected in multipath mode */
+		/* FW defines Jbod target Id within MR_PD_CFG_SEQ */
+		u16 support_pd_map_target_id:1;
+		/* FW swaps relevant fields in MR_BBU_VPD_INFO_FIXED to
+		 *  provide the data in little endian order
+		 */
+		u16 fw_swaps_bbu_vpd_info:1;
+		u16 reserved:8;
+	#endif
+		} adapter_operations4;
+	u8 pad[0x800 - 0x7FE]; /* 0x7FE pad to 2K for expansion */
+} __packed;
+
+/*
+ * ===============================
+ * MegaRAID SAS driver definitions
+ * ===============================
+ */
+#define MEGASAS_MAX_PD_CHANNELS			2
+#define MEGASAS_MAX_LD_CHANNELS			2
+#define MEGASAS_MAX_CHANNELS			(MEGASAS_MAX_PD_CHANNELS + \
+						MEGASAS_MAX_LD_CHANNELS)
+#define MEGASAS_MAX_DEV_PER_CHANNEL		128
+#define MEGASAS_DEFAULT_INIT_ID			-1
+#define MEGASAS_MAX_LUN				8
+#define MEGASAS_DEFAULT_CMD_PER_LUN		256
+#define MEGASAS_MAX_PD                          (MEGASAS_MAX_PD_CHANNELS * \
+						MEGASAS_MAX_DEV_PER_CHANNEL)
+#define MEGASAS_MAX_LD_IDS			(MEGASAS_MAX_LD_CHANNELS * \
+						MEGASAS_MAX_DEV_PER_CHANNEL)
+
+#define MEGASAS_MAX_SECTORS                    (2*1024)
+#define MEGASAS_MAX_SECTORS_IEEE		(2*128)
+#define MEGASAS_DBG_LVL				1
+
+#define MEGASAS_FW_BUSY				1
+
+/* Driver's internal Logging levels*/
+#define OCR_LOGS    (1 << 0)
+
+#define SCAN_PD_CHANNEL	0x1
+#define SCAN_VD_CHANNEL	0x2
+
+#define MEGASAS_KDUMP_QUEUE_DEPTH               100
+#define MR_LARGE_IO_MIN_SIZE			(32 * 1024)
+#define MR_R1_LDIO_PIGGYBACK_DEFAULT		4
+
+enum MR_SCSI_CMD_TYPE {
+	READ_WRITE_LDIO = 0,
+	NON_READ_WRITE_LDIO = 1,
+	READ_WRITE_SYSPDIO = 2,
+	NON_READ_WRITE_SYSPDIO = 3,
+};
+
+enum DCMD_TIMEOUT_ACTION {
+	INITIATE_OCR = 0,
+	KILL_ADAPTER = 1,
+	IGNORE_TIMEOUT = 2,
+};
+
+enum FW_BOOT_CONTEXT {
+	PROBE_CONTEXT = 0,
+	OCR_CONTEXT = 1,
+};
+
+/* Frame Type */
+#define IO_FRAME				0
+#define PTHRU_FRAME				1
+
+/*
+ * When SCSI mid-layer calls driver's reset routine, driver waits for
+ * MEGASAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note
+ * that the driver cannot _actually_ abort or reset pending commands. While
+ * it is waiting for the commands to complete, it prints a diagnostic message
+ * every MEGASAS_RESET_NOTICE_INTERVAL seconds
+ */
+#define MEGASAS_RESET_WAIT_TIME			180
+#define MEGASAS_INTERNAL_CMD_WAIT_TIME		180
+#define	MEGASAS_RESET_NOTICE_INTERVAL		5
+#define MEGASAS_IOCTL_CMD			0
+#define MEGASAS_DEFAULT_CMD_TIMEOUT		90
+#define MEGASAS_THROTTLE_QUEUE_DEPTH		16
+#define MEGASAS_BLOCKED_CMD_TIMEOUT		60
+/*
+ * FW reports the maximum of number of commands that it can accept (maximum
+ * commands that can be outstanding) at any time. The driver must report a
+ * lower number to the mid layer because it can issue a few internal commands
+ * itself (E.g, AEN, abort cmd, IOCTLs etc). The number of commands it needs
+ * is shown below
+ */
+#define MEGASAS_INT_CMDS			32
+#define MEGASAS_SKINNY_INT_CMDS			5
+#define MEGASAS_FUSION_INTERNAL_CMDS		8
+#define MEGASAS_FUSION_IOCTL_CMDS		3
+#define MEGASAS_MFI_IOCTL_CMDS			27
+
+#define MEGASAS_MAX_MSIX_QUEUES			128
+/*
+ * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit
+ * SGLs based on the size of dma_addr_t
+ */
+#define IS_DMA64				(sizeof(dma_addr_t) == 8)
+
+#define MFI_XSCALE_OMR0_CHANGE_INTERRUPT		0x00000001
+
+#define MFI_INTR_FLAG_REPLY_MESSAGE			0x00000001
+#define MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE		0x00000002
+#define MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT	0x00000004
+
+#define MFI_OB_INTR_STATUS_MASK			0x00000002
+#define MFI_POLL_TIMEOUT_SECS			60
+#define MFI_IO_TIMEOUT_SECS			180
+#define MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF	(5 * HZ)
+#define MEGASAS_OCR_SETTLE_TIME_VF		(1000 * 30)
+#define MEGASAS_ROUTINE_WAIT_TIME_VF		300
+#define MFI_REPLY_1078_MESSAGE_INTERRUPT	0x80000000
+#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT	0x00000001
+#define MFI_GEN2_ENABLE_INTERRUPT_MASK		(0x00000001 | 0x00000004)
+#define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT	0x40000000
+#define MFI_SKINNY_ENABLE_INTERRUPT_MASK	(0x00000001)
+
+#define MFI_1068_PCSR_OFFSET			0x84
+#define MFI_1068_FW_HANDSHAKE_OFFSET		0x64
+#define MFI_1068_FW_READY			0xDDDD0000
+
+#define MR_MAX_REPLY_QUEUES_OFFSET              0X0000001F
+#define MR_MAX_REPLY_QUEUES_EXT_OFFSET          0X003FC000
+#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT    14
+#define MR_MAX_MSIX_REG_ARRAY                   16
+#define MR_RDPQ_MODE_OFFSET			0X00800000
+
+#define MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT	16
+#define MR_MAX_RAID_MAP_SIZE_MASK		0x1FF
+#define MR_MIN_MAP_SIZE				0x10000
+/* 64k */
+
+#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET		0X01000000
+
+enum MR_ADAPTER_TYPE {
+	MFI_SERIES = 1,
+	THUNDERBOLT_SERIES = 2,
+	INVADER_SERIES = 3,
+	VENTURA_SERIES = 4,
+};
+
+/*
+* register set for both 1068 and 1078 controllers
+* structure extended for 1078 registers
+*/
+
+struct megasas_register_set {
+	u32	doorbell;                       /*0000h*/
+	u32	fusion_seq_offset;		/*0004h*/
+	u32	fusion_host_diag;		/*0008h*/
+	u32	reserved_01;			/*000Ch*/
+
+	u32 	inbound_msg_0;			/*0010h*/
+	u32 	inbound_msg_1;			/*0014h*/
+	u32 	outbound_msg_0;			/*0018h*/
+	u32 	outbound_msg_1;			/*001Ch*/
+
+	u32 	inbound_doorbell;		/*0020h*/
+	u32 	inbound_intr_status;		/*0024h*/
+	u32 	inbound_intr_mask;		/*0028h*/
+
+	u32 	outbound_doorbell;		/*002Ch*/
+	u32 	outbound_intr_status;		/*0030h*/
+	u32 	outbound_intr_mask;		/*0034h*/
+
+	u32 	reserved_1[2];			/*0038h*/
+
+	u32 	inbound_queue_port;		/*0040h*/
+	u32 	outbound_queue_port;		/*0044h*/
+
+	u32	reserved_2[9];			/*0048h*/
+	u32	reply_post_host_index;		/*006Ch*/
+	u32	reserved_2_2[12];		/*0070h*/
+
+	u32 	outbound_doorbell_clear;	/*00A0h*/
+
+	u32 	reserved_3[3];			/*00A4h*/
+
+	u32 	outbound_scratch_pad ;		/*00B0h*/
+	u32	outbound_scratch_pad_2;         /*00B4h*/
+	u32	outbound_scratch_pad_3;         /*00B8h*/
+	u32	outbound_scratch_pad_4;         /*00BCh*/
+
+
+	u32 	inbound_low_queue_port ;	/*00C0h*/
+
+	u32 	inbound_high_queue_port ;	/*00C4h*/
+
+	u32 inbound_single_queue_port;	/*00C8h*/
+	u32	res_6[11];			/*CCh*/
+	u32	host_diag;
+	u32	seq_offset;
+	u32 	index_registers[807];		/*00CCh*/
+} __attribute__ ((packed));
+
+struct megasas_sge32 {
+
+	__le32 phys_addr;
+	__le32 length;
+
+} __attribute__ ((packed));
+
+struct megasas_sge64 {
+
+	__le64 phys_addr;
+	__le32 length;
+
+} __attribute__ ((packed));
+
+struct megasas_sge_skinny {
+	__le64 phys_addr;
+	__le32 length;
+	__le32 flag;
+} __packed;
+
+union megasas_sgl {
+
+	struct megasas_sge32 sge32[1];
+	struct megasas_sge64 sge64[1];
+	struct megasas_sge_skinny sge_skinny[1];
+
+} __attribute__ ((packed));
+
+struct megasas_header {
+
+	u8 cmd;			/*00h */
+	u8 sense_len;		/*01h */
+	u8 cmd_status;		/*02h */
+	u8 scsi_status;		/*03h */
+
+	u8 target_id;		/*04h */
+	u8 lun;			/*05h */
+	u8 cdb_len;		/*06h */
+	u8 sge_count;		/*07h */
+
+	__le32 context;		/*08h */
+	__le32 pad_0;		/*0Ch */
+
+	__le16 flags;		/*10h */
+	__le16 timeout;		/*12h */
+	__le32 data_xferlen;	/*14h */
+
+} __attribute__ ((packed));
+
+union megasas_sgl_frame {
+
+	struct megasas_sge32 sge32[8];
+	struct megasas_sge64 sge64[5];
+
+} __attribute__ ((packed));
+
+typedef union _MFI_CAPABILITIES {
+	struct {
+#if   defined(__BIG_ENDIAN_BITFIELD)
+	u32     reserved:19;
+	u32 support_pd_map_target_id:1;
+	u32     support_qd_throttling:1;
+	u32     support_fp_rlbypass:1;
+	u32     support_vfid_in_ioframe:1;
+	u32     support_ext_io_size:1;
+	u32		support_ext_queue_depth:1;
+	u32     security_protocol_cmds_fw:1;
+	u32     support_core_affinity:1;
+	u32     support_ndrive_r1_lb:1;
+	u32		support_max_255lds:1;
+	u32		support_fastpath_wb:1;
+	u32     support_additional_msix:1;
+	u32     support_fp_remote_lun:1;
+#else
+	u32     support_fp_remote_lun:1;
+	u32     support_additional_msix:1;
+	u32		support_fastpath_wb:1;
+	u32		support_max_255lds:1;
+	u32     support_ndrive_r1_lb:1;
+	u32     support_core_affinity:1;
+	u32     security_protocol_cmds_fw:1;
+	u32		support_ext_queue_depth:1;
+	u32     support_ext_io_size:1;
+	u32     support_vfid_in_ioframe:1;
+	u32     support_fp_rlbypass:1;
+	u32     support_qd_throttling:1;
+	u32	support_pd_map_target_id:1;
+	u32     reserved:19;
+#endif
+	} mfi_capabilities;
+	__le32		reg;
+} MFI_CAPABILITIES;
+
+struct megasas_init_frame {
+
+	u8 cmd;			/*00h */
+	u8 reserved_0;		/*01h */
+	u8 cmd_status;		/*02h */
+
+	u8 reserved_1;		/*03h */
+	MFI_CAPABILITIES driver_operations; /*04h*/
+
+	__le32 context;		/*08h */
+	__le32 pad_0;		/*0Ch */
+
+	__le16 flags;		/*10h */
+	__le16 reserved_3;		/*12h */
+	__le32 data_xfer_len;	/*14h */
+
+	__le32 queue_info_new_phys_addr_lo;	/*18h */
+	__le32 queue_info_new_phys_addr_hi;	/*1Ch */
+	__le32 queue_info_old_phys_addr_lo;	/*20h */
+	__le32 queue_info_old_phys_addr_hi;	/*24h */
+	__le32 reserved_4[2];	/*28h */
+	__le32 system_info_lo;      /*30h */
+	__le32 system_info_hi;      /*34h */
+	__le32 reserved_5[2];	/*38h */
+
+} __attribute__ ((packed));
+
+struct megasas_init_queue_info {
+
+	__le32 init_flags;		/*00h */
+	__le32 reply_queue_entries;	/*04h */
+
+	__le32 reply_queue_start_phys_addr_lo;	/*08h */
+	__le32 reply_queue_start_phys_addr_hi;	/*0Ch */
+	__le32 producer_index_phys_addr_lo;	/*10h */
+	__le32 producer_index_phys_addr_hi;	/*14h */
+	__le32 consumer_index_phys_addr_lo;	/*18h */
+	__le32 consumer_index_phys_addr_hi;	/*1Ch */
+
+} __attribute__ ((packed));
+
+struct megasas_io_frame {
+
+	u8 cmd;			/*00h */
+	u8 sense_len;		/*01h */
+	u8 cmd_status;		/*02h */
+	u8 scsi_status;		/*03h */
+
+	u8 target_id;		/*04h */
+	u8 access_byte;		/*05h */
+	u8 reserved_0;		/*06h */
+	u8 sge_count;		/*07h */
+
+	__le32 context;		/*08h */
+	__le32 pad_0;		/*0Ch */
+
+	__le16 flags;		/*10h */
+	__le16 timeout;		/*12h */
+	__le32 lba_count;	/*14h */
+
+	__le32 sense_buf_phys_addr_lo;	/*18h */
+	__le32 sense_buf_phys_addr_hi;	/*1Ch */
+
+	__le32 start_lba_lo;	/*20h */
+	__le32 start_lba_hi;	/*24h */
+
+	union megasas_sgl sgl;	/*28h */
+
+} __attribute__ ((packed));
+
+struct megasas_pthru_frame {
+
+	u8 cmd;			/*00h */
+	u8 sense_len;		/*01h */
+	u8 cmd_status;		/*02h */
+	u8 scsi_status;		/*03h */
+
+	u8 target_id;		/*04h */
+	u8 lun;			/*05h */
+	u8 cdb_len;		/*06h */
+	u8 sge_count;		/*07h */
+
+	__le32 context;		/*08h */
+	__le32 pad_0;		/*0Ch */
+
+	__le16 flags;		/*10h */
+	__le16 timeout;		/*12h */
+	__le32 data_xfer_len;	/*14h */
+
+	__le32 sense_buf_phys_addr_lo;	/*18h */
+	__le32 sense_buf_phys_addr_hi;	/*1Ch */
+
+	u8 cdb[16];		/*20h */
+	union megasas_sgl sgl;	/*30h */
+
+} __attribute__ ((packed));
+
+struct megasas_dcmd_frame {
+
+	u8 cmd;			/*00h */
+	u8 reserved_0;		/*01h */
+	u8 cmd_status;		/*02h */
+	u8 reserved_1[4];	/*03h */
+	u8 sge_count;		/*07h */
+
+	__le32 context;		/*08h */
+	__le32 pad_0;		/*0Ch */
+
+	__le16 flags;		/*10h */
+	__le16 timeout;		/*12h */
+
+	__le32 data_xfer_len;	/*14h */
+	__le32 opcode;		/*18h */
+
+	union {			/*1Ch */
+		u8 b[12];
+		__le16 s[6];
+		__le32 w[3];
+	} mbox;
+
+	union megasas_sgl sgl;	/*28h */
+
+} __attribute__ ((packed));
+
+struct megasas_abort_frame {
+
+	u8 cmd;			/*00h */
+	u8 reserved_0;		/*01h */
+	u8 cmd_status;		/*02h */
+
+	u8 reserved_1;		/*03h */
+	__le32 reserved_2;	/*04h */
+
+	__le32 context;		/*08h */
+	__le32 pad_0;		/*0Ch */
+
+	__le16 flags;		/*10h */
+	__le16 reserved_3;	/*12h */
+	__le32 reserved_4;	/*14h */
+
+	__le32 abort_context;	/*18h */
+	__le32 pad_1;		/*1Ch */
+
+	__le32 abort_mfi_phys_addr_lo;	/*20h */
+	__le32 abort_mfi_phys_addr_hi;	/*24h */
+
+	__le32 reserved_5[6];	/*28h */
+
+} __attribute__ ((packed));
+
+struct megasas_smp_frame {
+
+	u8 cmd;			/*00h */
+	u8 reserved_1;		/*01h */
+	u8 cmd_status;		/*02h */
+	u8 connection_status;	/*03h */
+
+	u8 reserved_2[3];	/*04h */
+	u8 sge_count;		/*07h */
+
+	__le32 context;		/*08h */
+	__le32 pad_0;		/*0Ch */
+
+	__le16 flags;		/*10h */
+	__le16 timeout;		/*12h */
+
+	__le32 data_xfer_len;	/*14h */
+	__le64 sas_addr;	/*18h */
+
+	union {
+		struct megasas_sge32 sge32[2];	/* [0]: resp [1]: req */
+		struct megasas_sge64 sge64[2];	/* [0]: resp [1]: req */
+	} sgl;
+
+} __attribute__ ((packed));
+
+struct megasas_stp_frame {
+
+	u8 cmd;			/*00h */
+	u8 reserved_1;		/*01h */
+	u8 cmd_status;		/*02h */
+	u8 reserved_2;		/*03h */
+
+	u8 target_id;		/*04h */
+	u8 reserved_3[2];	/*05h */
+	u8 sge_count;		/*07h */
+
+	__le32 context;		/*08h */
+	__le32 pad_0;		/*0Ch */
+
+	__le16 flags;		/*10h */
+	__le16 timeout;		/*12h */
+
+	__le32 data_xfer_len;	/*14h */
+
+	__le16 fis[10];		/*18h */
+	__le32 stp_flags;
+
+	union {
+		struct megasas_sge32 sge32[2];	/* [0]: resp [1]: data */
+		struct megasas_sge64 sge64[2];	/* [0]: resp [1]: data */
+	} sgl;
+
+} __attribute__ ((packed));
+
+union megasas_frame {
+
+	struct megasas_header hdr;
+	struct megasas_init_frame init;
+	struct megasas_io_frame io;
+	struct megasas_pthru_frame pthru;
+	struct megasas_dcmd_frame dcmd;
+	struct megasas_abort_frame abort;
+	struct megasas_smp_frame smp;
+	struct megasas_stp_frame stp;
+
+	u8 raw_bytes[64];
+};
+
+/**
+ * struct MR_PRIV_DEVICE - sdev private hostdata
+ * @is_tm_capable: firmware managed tm_capable flag
+ * @tm_busy: TM request is in progress
+ */
+struct MR_PRIV_DEVICE {
+	bool is_tm_capable;
+	bool tm_busy;
+	atomic_t r1_ldio_hint;
+	u8   interface_type;
+};
+struct megasas_cmd;
+
+union megasas_evt_class_locale {
+
+	struct {
+#ifndef __BIG_ENDIAN_BITFIELD
+		u16 locale;
+		u8 reserved;
+		s8 class;
+#else
+		s8 class;
+		u8 reserved;
+		u16 locale;
+#endif
+	} __attribute__ ((packed)) members;
+
+	u32 word;
+
+} __attribute__ ((packed));
+
+struct megasas_evt_log_info {
+	__le32 newest_seq_num;
+	__le32 oldest_seq_num;
+	__le32 clear_seq_num;
+	__le32 shutdown_seq_num;
+	__le32 boot_seq_num;
+
+} __attribute__ ((packed));
+
+struct megasas_progress {
+
+	__le16 progress;
+	__le16 elapsed_seconds;
+
+} __attribute__ ((packed));
+
+struct megasas_evtarg_ld {
+
+	u16 target_id;
+	u8 ld_index;
+	u8 reserved;
+
+} __attribute__ ((packed));
+
+struct megasas_evtarg_pd {
+	u16 device_id;
+	u8 encl_index;
+	u8 slot_number;
+
+} __attribute__ ((packed));
+
+struct megasas_evt_detail {
+
+	__le32 seq_num;
+	__le32 time_stamp;
+	__le32 code;
+	union megasas_evt_class_locale cl;
+	u8 arg_type;
+	u8 reserved1[15];
+
+	union {
+		struct {
+			struct megasas_evtarg_pd pd;
+			u8 cdb_length;
+			u8 sense_length;
+			u8 reserved[2];
+			u8 cdb[16];
+			u8 sense[64];
+		} __attribute__ ((packed)) cdbSense;
+
+		struct megasas_evtarg_ld ld;
+
+		struct {
+			struct megasas_evtarg_ld ld;
+			__le64 count;
+		} __attribute__ ((packed)) ld_count;
+
+		struct {
+			__le64 lba;
+			struct megasas_evtarg_ld ld;
+		} __attribute__ ((packed)) ld_lba;
+
+		struct {
+			struct megasas_evtarg_ld ld;
+			__le32 prevOwner;
+			__le32 newOwner;
+		} __attribute__ ((packed)) ld_owner;
+
+		struct {
+			u64 ld_lba;
+			u64 pd_lba;
+			struct megasas_evtarg_ld ld;
+			struct megasas_evtarg_pd pd;
+		} __attribute__ ((packed)) ld_lba_pd_lba;
+
+		struct {
+			struct megasas_evtarg_ld ld;
+			struct megasas_progress prog;
+		} __attribute__ ((packed)) ld_prog;
+
+		struct {
+			struct megasas_evtarg_ld ld;
+			u32 prev_state;
+			u32 new_state;
+		} __attribute__ ((packed)) ld_state;
+
+		struct {
+			u64 strip;
+			struct megasas_evtarg_ld ld;
+		} __attribute__ ((packed)) ld_strip;
+
+		struct megasas_evtarg_pd pd;
+
+		struct {
+			struct megasas_evtarg_pd pd;
+			u32 err;
+		} __attribute__ ((packed)) pd_err;
+
+		struct {
+			u64 lba;
+			struct megasas_evtarg_pd pd;
+		} __attribute__ ((packed)) pd_lba;
+
+		struct {
+			u64 lba;
+			struct megasas_evtarg_pd pd;
+			struct megasas_evtarg_ld ld;
+		} __attribute__ ((packed)) pd_lba_ld;
+
+		struct {
+			struct megasas_evtarg_pd pd;
+			struct megasas_progress prog;
+		} __attribute__ ((packed)) pd_prog;
+
+		struct {
+			struct megasas_evtarg_pd pd;
+			u32 prevState;
+			u32 newState;
+		} __attribute__ ((packed)) pd_state;
+
+		struct {
+			u16 vendorId;
+			__le16 deviceId;
+			u16 subVendorId;
+			u16 subDeviceId;
+		} __attribute__ ((packed)) pci;
+
+		u32 rate;
+		char str[96];
+
+		struct {
+			u32 rtc;
+			u32 elapsedSeconds;
+		} __attribute__ ((packed)) time;
+
+		struct {
+			u32 ecar;
+			u32 elog;
+			char str[64];
+		} __attribute__ ((packed)) ecc;
+
+		u8 b[96];
+		__le16 s[48];
+		__le32 w[24];
+		__le64 d[12];
+	} args;
+
+	char description[128];
+
+} __attribute__ ((packed));
+
+struct megasas_aen_event {
+	struct delayed_work hotplug_work;
+	struct megasas_instance *instance;
+};
+
+struct megasas_irq_context {
+	struct megasas_instance *instance;
+	u32 MSIxIndex;
+};
+
+struct MR_DRV_SYSTEM_INFO {
+	u8	infoVersion;
+	u8	systemIdLength;
+	u16	reserved0;
+	u8	systemId[64];
+	u8	reserved[1980];
+};
+
+enum MR_PD_TYPE {
+	UNKNOWN_DRIVE = 0,
+	PARALLEL_SCSI = 1,
+	SAS_PD = 2,
+	SATA_PD = 3,
+	FC_PD = 4,
+	NVME_PD = 5,
+};
+
+/* JBOD Queue depth definitions */
+#define MEGASAS_SATA_QD	32
+#define MEGASAS_SAS_QD	64
+#define MEGASAS_DEFAULT_PD_QD	64
+#define MEGASAS_NVME_QD		32
+
+#define MR_DEFAULT_NVME_PAGE_SIZE	4096
+#define MR_DEFAULT_NVME_PAGE_SHIFT	12
+#define MR_DEFAULT_NVME_MDTS_KB		128
+#define MR_NVME_PAGE_SIZE_MASK		0x000000FF
+
+struct megasas_instance {
+
+	unsigned int *reply_map;
+	__le32 *producer;
+	dma_addr_t producer_h;
+	__le32 *consumer;
+	dma_addr_t consumer_h;
+	struct MR_DRV_SYSTEM_INFO *system_info_buf;
+	dma_addr_t system_info_h;
+	struct MR_LD_VF_AFFILIATION *vf_affiliation;
+	dma_addr_t vf_affiliation_h;
+	struct MR_LD_VF_AFFILIATION_111 *vf_affiliation_111;
+	dma_addr_t vf_affiliation_111_h;
+	struct MR_CTRL_HB_HOST_MEM *hb_host_mem;
+	dma_addr_t hb_host_mem_h;
+	struct MR_PD_INFO *pd_info;
+	dma_addr_t pd_info_h;
+	struct MR_TARGET_PROPERTIES *tgt_prop;
+	dma_addr_t tgt_prop_h;
+
+	__le32 *reply_queue;
+	dma_addr_t reply_queue_h;
+
+	u32 *crash_dump_buf;
+	dma_addr_t crash_dump_h;
+	void *crash_buf[MAX_CRASH_DUMP_SIZE];
+	unsigned int    fw_crash_buffer_size;
+	unsigned int    fw_crash_state;
+	unsigned int    fw_crash_buffer_offset;
+	u32 drv_buf_index;
+	u32 drv_buf_alloc;
+	u32 crash_dump_fw_support;
+	u32 crash_dump_drv_support;
+	u32 crash_dump_app_support;
+	u32 secure_jbod_support;
+	u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */
+	bool use_seqnum_jbod_fp;   /* Added for PD sequence */
+	spinlock_t crashdump_lock;
+
+	struct megasas_register_set __iomem *reg_set;
+	u32 __iomem *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
+	struct megasas_pd_list          pd_list[MEGASAS_MAX_PD];
+	struct megasas_pd_list          local_pd_list[MEGASAS_MAX_PD];
+	u8 ld_ids[MEGASAS_MAX_LD_IDS];
+	s8 init_id;
+
+	u16 max_num_sge;
+	u16 max_fw_cmds;
+	u16 max_mpt_cmds;
+	u16 max_mfi_cmds;
+	u16 max_scsi_cmds;
+	u16 ldio_threshold;
+	u16 cur_can_queue;
+	u32 max_sectors_per_req;
+	struct megasas_aen_event *ev;
+
+	struct megasas_cmd **cmd_list;
+	struct list_head cmd_pool;
+	/* used to sync fire the cmd to fw */
+	spinlock_t mfi_pool_lock;
+	/* used to sync fire the cmd to fw */
+	spinlock_t hba_lock;
+	/* used to synch producer, consumer ptrs in dpc */
+	spinlock_t stream_lock;
+	spinlock_t completion_lock;
+	struct dma_pool *frame_dma_pool;
+	struct dma_pool *sense_dma_pool;
+
+	struct megasas_evt_detail *evt_detail;
+	dma_addr_t evt_detail_h;
+	struct megasas_cmd *aen_cmd;
+	struct mutex hba_mutex;
+	struct semaphore ioctl_sem;
+
+	struct Scsi_Host *host;
+
+	wait_queue_head_t int_cmd_wait_q;
+	wait_queue_head_t abort_cmd_wait_q;
+
+	struct pci_dev *pdev;
+	u32 unique_id;
+	u32 fw_support_ieee;
+
+	atomic_t fw_outstanding;
+	atomic_t ldio_outstanding;
+	atomic_t fw_reset_no_pci_access;
+	atomic_t ieee_sgl;
+	atomic_t prp_sgl;
+	atomic_t sge_holes_type1;
+	atomic_t sge_holes_type2;
+	atomic_t sge_holes_type3;
+
+	struct megasas_instance_template *instancet;
+	struct tasklet_struct isr_tasklet;
+	struct work_struct work_init;
+	struct work_struct crash_init;
+
+	u8 flag;
+	u8 unload;
+	u8 flag_ieee;
+	u8 issuepend_done;
+	u8 disableOnlineCtrlReset;
+	u8 UnevenSpanSupport;
+
+	u8 supportmax256vd;
+	u8 pd_list_not_supported;
+	u16 fw_supported_vd_count;
+	u16 fw_supported_pd_count;
+
+	u16 drv_supported_vd_count;
+	u16 drv_supported_pd_count;
+
+	atomic_t adprecovery;
+	unsigned long last_time;
+	u32 mfiStatus;
+	u32 last_seq_num;
+
+	struct list_head internal_reset_pending_q;
+
+	/* Ptr to hba specific information */
+	void *ctrl_context;
+	u32 ctrl_context_pages;
+	struct megasas_ctrl_info *ctrl_info;
+	unsigned int msix_vectors;
+	struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
+	u64 map_id;
+	u64 pd_seq_map_id;
+	struct megasas_cmd *map_update_cmd;
+	struct megasas_cmd *jbod_seq_cmd;
+	unsigned long bar;
+	long reset_flags;
+	struct mutex reset_mutex;
+	struct timer_list sriov_heartbeat_timer;
+	char skip_heartbeat_timer_del;
+	u8 requestorId;
+	char PlasmaFW111;
+	char clusterId[MEGASAS_CLUSTER_ID_SIZE];
+	u8 peerIsPresent;
+	u8 passive;
+	u16 throttlequeuedepth;
+	u8 mask_interrupts;
+	u16 max_chain_frame_sz;
+	u8 is_imr;
+	u8 is_rdpq;
+	bool dev_handle;
+	bool fw_sync_cache_support;
+	u32 mfi_frame_size;
+	bool msix_combined;
+	u16 max_raid_mapsize;
+	/* preffered count to send as LDIO irrspective of FP capable.*/
+	u8  r1_ldio_hint_default;
+	u32 nvme_page_size;
+	u8 adapter_type;
+};
+struct MR_LD_VF_MAP {
+	u32 size;
+	union MR_LD_REF ref;
+	u8 ldVfCount;
+	u8 reserved[6];
+	u8 policy[1];
+};
+
+struct MR_LD_VF_AFFILIATION {
+	u32 size;
+	u8 ldCount;
+	u8 vfCount;
+	u8 thisVf;
+	u8 reserved[9];
+	struct MR_LD_VF_MAP map[1];
+};
+
+/* Plasma 1.11 FW backward compatibility structures */
+#define IOV_111_OFFSET 0x7CE
+#define MAX_VIRTUAL_FUNCTIONS 8
+#define MR_LD_ACCESS_HIDDEN 15
+
+struct IOV_111 {
+	u8 maxVFsSupported;
+	u8 numVFsEnabled;
+	u8 requestorId;
+	u8 reserved[5];
+};
+
+struct MR_LD_VF_MAP_111 {
+	u8 targetId;
+	u8 reserved[3];
+	u8 policy[MAX_VIRTUAL_FUNCTIONS];
+};
+
+struct MR_LD_VF_AFFILIATION_111 {
+	u8 vdCount;
+	u8 vfCount;
+	u8 thisVf;
+	u8 reserved[5];
+	struct MR_LD_VF_MAP_111 map[MAX_LOGICAL_DRIVES];
+};
+
+struct MR_CTRL_HB_HOST_MEM {
+	struct {
+		u32 fwCounter;	/* Firmware heart beat counter */
+		struct {
+			u32 debugmode:1; /* 1=Firmware is in debug mode.
+					    Heart beat will not be updated. */
+			u32 reserved:31;
+		} debug;
+		u32 reserved_fw[6];
+		u32 driverCounter; /* Driver heart beat counter.  0x20 */
+		u32 reserved_driver[7];
+	} HB;
+	u8 pad[0x400-0x40];
+};
+
+enum {
+	MEGASAS_HBA_OPERATIONAL			= 0,
+	MEGASAS_ADPRESET_SM_INFAULT		= 1,
+	MEGASAS_ADPRESET_SM_FW_RESET_SUCCESS	= 2,
+	MEGASAS_ADPRESET_SM_OPERATIONAL		= 3,
+	MEGASAS_HW_CRITICAL_ERROR		= 4,
+	MEGASAS_ADPRESET_SM_POLLING		= 5,
+	MEGASAS_ADPRESET_INPROG_SIGN		= 0xDEADDEAD,
+};
+
+struct megasas_instance_template {
+	void (*fire_cmd)(struct megasas_instance *, dma_addr_t, \
+		u32, struct megasas_register_set __iomem *);
+
+	void (*enable_intr)(struct megasas_instance *);
+	void (*disable_intr)(struct megasas_instance *);
+
+	int (*clear_intr)(struct megasas_register_set __iomem *);
+
+	u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *);
+	int (*adp_reset)(struct megasas_instance *, \
+		struct megasas_register_set __iomem *);
+	int (*check_reset)(struct megasas_instance *, \
+		struct megasas_register_set __iomem *);
+	irqreturn_t (*service_isr)(int irq, void *devp);
+	void (*tasklet)(unsigned long);
+	u32 (*init_adapter)(struct megasas_instance *);
+	u32 (*build_and_issue_cmd) (struct megasas_instance *,
+				    struct scsi_cmnd *);
+	void (*issue_dcmd)(struct megasas_instance *instance,
+			    struct megasas_cmd *cmd);
+};
+
+#define MEGASAS_IS_LOGICAL(sdev)					\
+	((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
+
+#define MEGASAS_DEV_INDEX(scp)						\
+	(((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +	\
+	scp->device->id)
+
+#define MEGASAS_PD_INDEX(scp)						\
+	((scp->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +		\
+	scp->device->id)
+
+struct megasas_cmd {
+
+	union megasas_frame *frame;
+	dma_addr_t frame_phys_addr;
+	u8 *sense;
+	dma_addr_t sense_phys_addr;
+
+	u32 index;
+	u8 sync_cmd;
+	u8 cmd_status_drv;
+	u8 abort_aen;
+	u8 retry_for_fw_reset;
+
+
+	struct list_head list;
+	struct scsi_cmnd *scmd;
+	u8 flags;
+
+	struct megasas_instance *instance;
+	union {
+		struct {
+			u16 smid;
+			u16 resvd;
+		} context;
+		u32 frame_count;
+	};
+};
+
+#define MAX_MGMT_ADAPTERS		1024
+#define MAX_IOCTL_SGE			16
+
+struct megasas_iocpacket {
+
+	u16 host_no;
+	u16 __pad1;
+	u32 sgl_off;
+	u32 sge_count;
+	u32 sense_off;
+	u32 sense_len;
+	union {
+		u8 raw[128];
+		struct megasas_header hdr;
+	} frame;
+
+	struct iovec sgl[MAX_IOCTL_SGE];
+
+} __attribute__ ((packed));
+
+struct megasas_aen {
+	u16 host_no;
+	u16 __pad1;
+	u32 seq_num;
+	u32 class_locale_word;
+} __attribute__ ((packed));
+
+#ifdef CONFIG_COMPAT
+struct compat_megasas_iocpacket {
+	u16 host_no;
+	u16 __pad1;
+	u32 sgl_off;
+	u32 sge_count;
+	u32 sense_off;
+	u32 sense_len;
+	union {
+		u8 raw[128];
+		struct megasas_header hdr;
+	} frame;
+	struct compat_iovec sgl[MAX_IOCTL_SGE];
+} __attribute__ ((packed));
+
+#define MEGASAS_IOC_FIRMWARE32	_IOWR('M', 1, struct compat_megasas_iocpacket)
+#endif
+
+#define MEGASAS_IOC_FIRMWARE	_IOWR('M', 1, struct megasas_iocpacket)
+#define MEGASAS_IOC_GET_AEN	_IOW('M', 3, struct megasas_aen)
+
+struct megasas_mgmt_info {
+
+	u16 count;
+	struct megasas_instance *instance[MAX_MGMT_ADAPTERS];
+	int max_index;
+};
+
+enum MEGASAS_OCR_CAUSE {
+	FW_FAULT_OCR			= 0,
+	SCSIIO_TIMEOUT_OCR		= 1,
+	MFI_IO_TIMEOUT_OCR		= 2,
+};
+
+enum DCMD_RETURN_STATUS {
+	DCMD_SUCCESS		= 0,
+	DCMD_TIMEOUT		= 1,
+	DCMD_FAILED		= 2,
+	DCMD_NOT_FIRED		= 3,
+};
+
+u8
+MR_BuildRaidContext(struct megasas_instance *instance,
+		    struct IO_REQUEST_INFO *io_info,
+		    struct RAID_CONTEXT *pRAID_Context,
+		    struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN);
+u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map);
+struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map);
+__le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
+
+__le16 get_updated_dev_handle(struct megasas_instance *instance,
+			      struct LD_LOAD_BALANCE_INFO *lbInfo,
+			      struct IO_REQUEST_INFO *in_info,
+			      struct MR_DRV_RAID_MAP_ALL *drv_map);
+void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map,
+	struct LD_LOAD_BALANCE_INFO *lbInfo);
+int megasas_get_ctrl_info(struct megasas_instance *instance);
+/* PD sequence */
+int
+megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend);
+void megasas_set_dynamic_target_properties(struct scsi_device *sdev);
+int megasas_set_crash_dump_params(struct megasas_instance *instance,
+	u8 crash_buf_state);
+void megasas_free_host_crash_buffer(struct megasas_instance *instance);
+void megasas_fusion_crash_dump_wq(struct work_struct *work);
+
+void megasas_return_cmd_fusion(struct megasas_instance *instance,
+	struct megasas_cmd_fusion *cmd);
+int megasas_issue_blocked_cmd(struct megasas_instance *instance,
+	struct megasas_cmd *cmd, int timeout);
+void __megasas_return_cmd(struct megasas_instance *instance,
+	struct megasas_cmd *cmd);
+
+void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance,
+	struct megasas_cmd *cmd_mfi, struct megasas_cmd_fusion *cmd_fusion);
+int megasas_cmd_type(struct scsi_cmnd *cmd);
+void megasas_setup_jbod_map(struct megasas_instance *instance);
+
+void megasas_update_sdev_properties(struct scsi_device *sdev);
+int megasas_reset_fusion(struct Scsi_Host *shost, int reason);
+int megasas_task_abort_fusion(struct scsi_cmnd *scmd);
+int megasas_reset_target_fusion(struct scsi_cmnd *scmd);
+u32 mega_mod64(u64 dividend, u32 divisor);
+int megasas_alloc_fusion_context(struct megasas_instance *instance);
+void megasas_free_fusion_context(struct megasas_instance *instance);
+#endif				/*LSI_MEGARAID_SAS_H */
diff --git a/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_sas_base.c b/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_sas_base.c
new file mode 100644
index 0000000..42d8760
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -0,0 +1,7768 @@
+/*
+ *  Linux MegaRAID driver for SAS based RAID controllers
+ *
+ *  Copyright (c) 2003-2013  LSI Corporation
+ *  Copyright (c) 2013-2014  Avago Technologies
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ *  Authors: Avago Technologies
+ *           Sreenivas Bagalkote
+ *           Sumant Patro
+ *           Bo Yang
+ *           Adam Radford
+ *           Kashyap Desai <kashyap.desai@avagotech.com>
+ *           Sumit Saxena <sumit.saxena@avagotech.com>
+ *
+ *  Send feedback to: megaraidlinux.pdl@avagotech.com
+ *
+ *  Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
+ *  San Jose, California 95131
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/uio.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <asm/unaligned.h>
+#include <linux/fs.h>
+#include <linux/compat.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/vmalloc.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include "megaraid_sas_fusion.h"
+#include "megaraid_sas.h"
+
+/*
+ * Number of sectors per IO command
+ * Will be set in megasas_init_mfi if user does not provide
+ */
+static unsigned int max_sectors;
+module_param_named(max_sectors, max_sectors, int, 0);
+MODULE_PARM_DESC(max_sectors,
+	"Maximum number of sectors per IO command");
+
+static int msix_disable;
+module_param(msix_disable, int, S_IRUGO);
+MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
+
+static unsigned int msix_vectors;
+module_param(msix_vectors, int, S_IRUGO);
+MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
+
+static int allow_vf_ioctls;
+module_param(allow_vf_ioctls, int, S_IRUGO);
+MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
+
+static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
+module_param(throttlequeuedepth, int, S_IRUGO);
+MODULE_PARM_DESC(throttlequeuedepth,
+	"Adapter queue depth when throttled due to I/O timeout. Default: 16");
+
+unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
+module_param(resetwaittime, int, S_IRUGO);
+MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
+		 "before resetting adapter. Default: 180");
+
+int smp_affinity_enable = 1;
+module_param(smp_affinity_enable, int, S_IRUGO);
+MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
+
+int rdpq_enable = 1;
+module_param(rdpq_enable, int, S_IRUGO);
+MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)");
+
+unsigned int dual_qdepth_disable;
+module_param(dual_qdepth_disable, int, S_IRUGO);
+MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
+
+unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
+module_param(scmd_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MEGASAS_VERSION);
+MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com");
+MODULE_DESCRIPTION("Avago MegaRAID SAS Driver");
+
+int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
+static int megasas_get_pd_list(struct megasas_instance *instance);
+static int megasas_ld_list_query(struct megasas_instance *instance,
+				 u8 query_type);
+static int megasas_issue_init_mfi(struct megasas_instance *instance);
+static int megasas_register_aen(struct megasas_instance *instance,
+				u32 seq_num, u32 class_locale_word);
+static void megasas_get_pd_info(struct megasas_instance *instance,
+				struct scsi_device *sdev);
+static int megasas_get_target_prop(struct megasas_instance *instance,
+				   struct scsi_device *sdev);
+/*
+ * PCI ID table for all supported controllers
+ */
+static struct pci_device_id megasas_pci_table[] = {
+
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
+	/* xscale IOP */
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
+	/* ppc IOP */
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
+	/* ppc IOP */
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
+	/* gen2*/
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
+	/* gen2*/
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
+	/* skinny*/
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
+	/* skinny*/
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
+	/* xscale IOP, vega */
+	{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
+	/* xscale IOP */
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
+	/* Fusion */
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
+	/* Plasma */
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
+	/* Invader */
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
+	/* Fury */
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
+	/* Intruder */
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
+	/* Intruder 24 port*/
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
+	/* VENTURA */
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
+	{}
+};
+
+MODULE_DEVICE_TABLE(pci, megasas_pci_table);
+
+static int megasas_mgmt_majorno;
+struct megasas_mgmt_info megasas_mgmt_info;
+static struct fasync_struct *megasas_async_queue;
+static DEFINE_MUTEX(megasas_async_queue_mutex);
+
+static int megasas_poll_wait_aen;
+static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
+static u32 support_poll_for_event;
+u32 megasas_dbg_lvl;
+static u32 support_device_change;
+
+/* define lock for aen poll */
+spinlock_t poll_aen_lock;
+
+void
+megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
+		     u8 alt_status);
+static u32
+megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
+static int
+megasas_adp_reset_gen2(struct megasas_instance *instance,
+		       struct megasas_register_set __iomem *reg_set);
+static irqreturn_t megasas_isr(int irq, void *devp);
+static u32
+megasas_init_adapter_mfi(struct megasas_instance *instance);
+u32
+megasas_build_and_issue_cmd(struct megasas_instance *instance,
+			    struct scsi_cmnd *scmd);
+static void megasas_complete_cmd_dpc(unsigned long instance_addr);
+int
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+	int seconds);
+void megasas_fusion_ocr_wq(struct work_struct *work);
+static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
+					 int initial);
+
+void
+megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+	instance->instancet->fire_cmd(instance,
+		cmd->frame_phys_addr, 0, instance->reg_set);
+	return;
+}
+
+/**
+ * megasas_get_cmd -	Get a command from the free pool
+ * @instance:		Adapter soft state
+ *
+ * Returns a free command from the pool
+ */
+struct megasas_cmd *megasas_get_cmd(struct megasas_instance
+						  *instance)
+{
+	unsigned long flags;
+	struct megasas_cmd *cmd = NULL;
+
+	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
+
+	if (!list_empty(&instance->cmd_pool)) {
+		cmd = list_entry((&instance->cmd_pool)->next,
+				 struct megasas_cmd, list);
+		list_del_init(&cmd->list);
+	} else {
+		dev_err(&instance->pdev->dev, "Command pool empty!\n");
+	}
+
+	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
+	return cmd;
+}
+
+/**
+ * megasas_return_cmd -	Return a cmd to free command pool
+ * @instance:		Adapter soft state
+ * @cmd:		Command packet to be returned to free command pool
+ */
+void
+megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+	unsigned long flags;
+	u32 blk_tags;
+	struct megasas_cmd_fusion *cmd_fusion;
+	struct fusion_context *fusion = instance->ctrl_context;
+
+	/* This flag is used only for fusion adapter.
+	 * Wait for Interrupt for Polled mode DCMD
+	 */
+	if (cmd->flags & DRV_DCMD_POLLED_MODE)
+		return;
+
+	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
+
+	if (fusion) {
+		blk_tags = instance->max_scsi_cmds + cmd->index;
+		cmd_fusion = fusion->cmd_list[blk_tags];
+		megasas_return_cmd_fusion(instance, cmd_fusion);
+	}
+	cmd->scmd = NULL;
+	cmd->frame_count = 0;
+	cmd->flags = 0;
+	memset(cmd->frame, 0, instance->mfi_frame_size);
+	cmd->frame->io.context = cpu_to_le32(cmd->index);
+	if (!fusion && reset_devices)
+		cmd->frame->hdr.cmd = MFI_CMD_INVALID;
+	list_add(&cmd->list, (&instance->cmd_pool)->next);
+
+	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
+
+}
+
+static const char *
+format_timestamp(uint32_t timestamp)
+{
+	static char buffer[32];
+
+	if ((timestamp & 0xff000000) == 0xff000000)
+		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
+		0x00ffffff);
+	else
+		snprintf(buffer, sizeof(buffer), "%us", timestamp);
+	return buffer;
+}
+
+static const char *
+format_class(int8_t class)
+{
+	static char buffer[6];
+
+	switch (class) {
+	case MFI_EVT_CLASS_DEBUG:
+		return "debug";
+	case MFI_EVT_CLASS_PROGRESS:
+		return "progress";
+	case MFI_EVT_CLASS_INFO:
+		return "info";
+	case MFI_EVT_CLASS_WARNING:
+		return "WARN";
+	case MFI_EVT_CLASS_CRITICAL:
+		return "CRIT";
+	case MFI_EVT_CLASS_FATAL:
+		return "FATAL";
+	case MFI_EVT_CLASS_DEAD:
+		return "DEAD";
+	default:
+		snprintf(buffer, sizeof(buffer), "%d", class);
+		return buffer;
+	}
+}
+
+/**
+  * megasas_decode_evt: Decode FW AEN event and print critical event
+  * for information.
+  * @instance:			Adapter soft state
+  */
+static void
+megasas_decode_evt(struct megasas_instance *instance)
+{
+	struct megasas_evt_detail *evt_detail = instance->evt_detail;
+	union megasas_evt_class_locale class_locale;
+	class_locale.word = le32_to_cpu(evt_detail->cl.word);
+
+	if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL)
+		dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
+			le32_to_cpu(evt_detail->seq_num),
+			format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
+			(class_locale.members.locale),
+			format_class(class_locale.members.class),
+			evt_detail->description);
+}
+
+/**
+*	The following functions are defined for xscale
+*	(deviceid : 1064R, PERC5) controllers
+*/
+
+/**
+ * megasas_enable_intr_xscale -	Enables interrupts
+ * @regs:			MFI register set
+ */
+static inline void
+megasas_enable_intr_xscale(struct megasas_instance *instance)
+{
+	struct megasas_register_set __iomem *regs;
+
+	regs = instance->reg_set;
+	writel(0, &(regs)->outbound_intr_mask);
+
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_xscale -Disables interrupt
+ * @regs:			MFI register set
+ */
+static inline void
+megasas_disable_intr_xscale(struct megasas_instance *instance)
+{
+	struct megasas_register_set __iomem *regs;
+	u32 mask = 0x1f;
+
+	regs = instance->reg_set;
+	writel(mask, &regs->outbound_intr_mask);
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_xscale - returns the current FW status value
+ * @regs:			MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
+{
+	return readl(&(regs)->outbound_msg_0);
+}
+/**
+ * megasas_clear_interrupt_xscale -	Check & clear interrupt
+ * @regs:				MFI register set
+ */
+static int
+megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
+{
+	u32 status;
+	u32 mfiStatus = 0;
+
+	/*
+	 * Check if it is our interrupt
+	 */
+	status = readl(&regs->outbound_intr_status);
+
+	if (status & MFI_OB_INTR_STATUS_MASK)
+		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+	if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
+		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+
+	/*
+	 * Clear the interrupt by writing back the same value
+	 */
+	if (mfiStatus)
+		writel(status, &regs->outbound_intr_status);
+
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_status);
+
+	return mfiStatus;
+}
+
+/**
+ * megasas_fire_cmd_xscale -	Sends command to the FW
+ * @frame_phys_addr :		Physical address of cmd
+ * @frame_count :		Number of frames for the command
+ * @regs :			MFI register set
+ */
+static inline void
+megasas_fire_cmd_xscale(struct megasas_instance *instance,
+		dma_addr_t frame_phys_addr,
+		u32 frame_count,
+		struct megasas_register_set __iomem *regs)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	writel((frame_phys_addr >> 3)|(frame_count),
+	       &(regs)->inbound_queue_port);
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * megasas_adp_reset_xscale -  For controller reset
+ * @regs:                              MFI register set
+ */
+static int
+megasas_adp_reset_xscale(struct megasas_instance *instance,
+	struct megasas_register_set __iomem *regs)
+{
+	u32 i;
+	u32 pcidata;
+
+	writel(MFI_ADP_RESET, &regs->inbound_doorbell);
+
+	for (i = 0; i < 3; i++)
+		msleep(1000); /* sleep for 3 secs */
+	pcidata  = 0;
+	pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
+	dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
+	if (pcidata & 0x2) {
+		dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
+		pcidata &= ~0x2;
+		pci_write_config_dword(instance->pdev,
+				MFI_1068_PCSR_OFFSET, pcidata);
+
+		for (i = 0; i < 2; i++)
+			msleep(1000); /* need to wait 2 secs again */
+
+		pcidata  = 0;
+		pci_read_config_dword(instance->pdev,
+				MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
+		dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
+		if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
+			dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
+			pcidata = 0;
+			pci_write_config_dword(instance->pdev,
+				MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
+		}
+	}
+	return 0;
+}
+
+/**
+ * megasas_check_reset_xscale -	For controller reset check
+ * @regs:				MFI register set
+ */
+static int
+megasas_check_reset_xscale(struct megasas_instance *instance,
+		struct megasas_register_set __iomem *regs)
+{
+	if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
+	    (le32_to_cpu(*instance->consumer) ==
+		MEGASAS_ADPRESET_INPROG_SIGN))
+		return 1;
+	return 0;
+}
+
+static struct megasas_instance_template megasas_instance_template_xscale = {
+
+	.fire_cmd = megasas_fire_cmd_xscale,
+	.enable_intr = megasas_enable_intr_xscale,
+	.disable_intr = megasas_disable_intr_xscale,
+	.clear_intr = megasas_clear_intr_xscale,
+	.read_fw_status_reg = megasas_read_fw_status_reg_xscale,
+	.adp_reset = megasas_adp_reset_xscale,
+	.check_reset = megasas_check_reset_xscale,
+	.service_isr = megasas_isr,
+	.tasklet = megasas_complete_cmd_dpc,
+	.init_adapter = megasas_init_adapter_mfi,
+	.build_and_issue_cmd = megasas_build_and_issue_cmd,
+	.issue_dcmd = megasas_issue_dcmd,
+};
+
+/**
+*	This is the end of set of functions & definitions specific
+*	to xscale (deviceid : 1064R, PERC5) controllers
+*/
+
+/**
+*	The following functions are defined for ppc (deviceid : 0x60)
+*	controllers
+*/
+
+/**
+ * megasas_enable_intr_ppc -	Enables interrupts
+ * @regs:			MFI register set
+ */
+static inline void
+megasas_enable_intr_ppc(struct megasas_instance *instance)
+{
+	struct megasas_register_set __iomem *regs;
+
+	regs = instance->reg_set;
+	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
+
+	writel(~0x80000000, &(regs)->outbound_intr_mask);
+
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_ppc -	Disable interrupt
+ * @regs:			MFI register set
+ */
+static inline void
+megasas_disable_intr_ppc(struct megasas_instance *instance)
+{
+	struct megasas_register_set __iomem *regs;
+	u32 mask = 0xFFFFFFFF;
+
+	regs = instance->reg_set;
+	writel(mask, &regs->outbound_intr_mask);
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_ppc - returns the current FW status value
+ * @regs:			MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
+{
+	return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_clear_interrupt_ppc -	Check & clear interrupt
+ * @regs:				MFI register set
+ */
+static int
+megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
+{
+	u32 status, mfiStatus = 0;
+
+	/*
+	 * Check if it is our interrupt
+	 */
+	status = readl(&regs->outbound_intr_status);
+
+	if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
+		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+
+	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
+		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+
+	/*
+	 * Clear the interrupt by writing back the same value
+	 */
+	writel(status, &regs->outbound_doorbell_clear);
+
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_doorbell_clear);
+
+	return mfiStatus;
+}
+
+/**
+ * megasas_fire_cmd_ppc -	Sends command to the FW
+ * @frame_phys_addr :		Physical address of cmd
+ * @frame_count :		Number of frames for the command
+ * @regs :			MFI register set
+ */
+static inline void
+megasas_fire_cmd_ppc(struct megasas_instance *instance,
+		dma_addr_t frame_phys_addr,
+		u32 frame_count,
+		struct megasas_register_set __iomem *regs)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	writel((frame_phys_addr | (frame_count<<1))|1,
+			&(regs)->inbound_queue_port);
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * megasas_check_reset_ppc -	For controller reset check
+ * @regs:				MFI register set
+ */
+static int
+megasas_check_reset_ppc(struct megasas_instance *instance,
+			struct megasas_register_set __iomem *regs)
+{
+	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
+		return 1;
+
+	return 0;
+}
+
+static struct megasas_instance_template megasas_instance_template_ppc = {
+
+	.fire_cmd = megasas_fire_cmd_ppc,
+	.enable_intr = megasas_enable_intr_ppc,
+	.disable_intr = megasas_disable_intr_ppc,
+	.clear_intr = megasas_clear_intr_ppc,
+	.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
+	.adp_reset = megasas_adp_reset_xscale,
+	.check_reset = megasas_check_reset_ppc,
+	.service_isr = megasas_isr,
+	.tasklet = megasas_complete_cmd_dpc,
+	.init_adapter = megasas_init_adapter_mfi,
+	.build_and_issue_cmd = megasas_build_and_issue_cmd,
+	.issue_dcmd = megasas_issue_dcmd,
+};
+
+/**
+ * megasas_enable_intr_skinny -	Enables interrupts
+ * @regs:			MFI register set
+ */
+static inline void
+megasas_enable_intr_skinny(struct megasas_instance *instance)
+{
+	struct megasas_register_set __iomem *regs;
+
+	regs = instance->reg_set;
+	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
+
+	writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
+
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_skinny -	Disables interrupt
+ * @regs:			MFI register set
+ */
+static inline void
+megasas_disable_intr_skinny(struct megasas_instance *instance)
+{
+	struct megasas_register_set __iomem *regs;
+	u32 mask = 0xFFFFFFFF;
+
+	regs = instance->reg_set;
+	writel(mask, &regs->outbound_intr_mask);
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_skinny - returns the current FW status value
+ * @regs:			MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
+{
+	return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_clear_interrupt_skinny -	Check & clear interrupt
+ * @regs:				MFI register set
+ */
+static int
+megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
+{
+	u32 status;
+	u32 mfiStatus = 0;
+
+	/*
+	 * Check if it is our interrupt
+	 */
+	status = readl(&regs->outbound_intr_status);
+
+	if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
+		return 0;
+	}
+
+	/*
+	 * Check if it is our interrupt
+	 */
+	if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) ==
+	    MFI_STATE_FAULT) {
+		mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+	} else
+		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+
+	/*
+	 * Clear the interrupt by writing back the same value
+	 */
+	writel(status, &regs->outbound_intr_status);
+
+	/*
+	 * dummy read to flush PCI
+	 */
+	readl(&regs->outbound_intr_status);
+
+	return mfiStatus;
+}
+
+/**
+ * megasas_fire_cmd_skinny -	Sends command to the FW
+ * @frame_phys_addr :		Physical address of cmd
+ * @frame_count :		Number of frames for the command
+ * @regs :			MFI register set
+ */
+static inline void
+megasas_fire_cmd_skinny(struct megasas_instance *instance,
+			dma_addr_t frame_phys_addr,
+			u32 frame_count,
+			struct megasas_register_set __iomem *regs)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	writel(upper_32_bits(frame_phys_addr),
+	       &(regs)->inbound_high_queue_port);
+	writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
+	       &(regs)->inbound_low_queue_port);
+	mmiowb();
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * megasas_check_reset_skinny -	For controller reset check
+ * @regs:				MFI register set
+ */
+static int
+megasas_check_reset_skinny(struct megasas_instance *instance,
+				struct megasas_register_set __iomem *regs)
+{
+	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
+		return 1;
+
+	return 0;
+}
+
+static struct megasas_instance_template megasas_instance_template_skinny = {
+
+	.fire_cmd = megasas_fire_cmd_skinny,
+	.enable_intr = megasas_enable_intr_skinny,
+	.disable_intr = megasas_disable_intr_skinny,
+	.clear_intr = megasas_clear_intr_skinny,
+	.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
+	.adp_reset = megasas_adp_reset_gen2,
+	.check_reset = megasas_check_reset_skinny,
+	.service_isr = megasas_isr,
+	.tasklet = megasas_complete_cmd_dpc,
+	.init_adapter = megasas_init_adapter_mfi,
+	.build_and_issue_cmd = megasas_build_and_issue_cmd,
+	.issue_dcmd = megasas_issue_dcmd,
+};
+
+
+/**
+*	The following functions are defined for gen2 (deviceid : 0x78 0x79)
+*	controllers
+*/
+
+/**
+ * megasas_enable_intr_gen2 -  Enables interrupts
+ * @regs:                      MFI register set
+ */
+static inline void
+megasas_enable_intr_gen2(struct megasas_instance *instance)
+{
+	struct megasas_register_set __iomem *regs;
+
+	regs = instance->reg_set;
+	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
+
+	/* write ~0x00000005 (4 & 1) to the intr mask*/
+	writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
+
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_gen2 - Disables interrupt
+ * @regs:                      MFI register set
+ */
+static inline void
+megasas_disable_intr_gen2(struct megasas_instance *instance)
+{
+	struct megasas_register_set __iomem *regs;
+	u32 mask = 0xFFFFFFFF;
+
+	regs = instance->reg_set;
+	writel(mask, &regs->outbound_intr_mask);
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_gen2 - returns the current FW status value
+ * @regs:                      MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
+{
+	return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_clear_interrupt_gen2 -      Check & clear interrupt
+ * @regs:                              MFI register set
+ */
+static int
+megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
+{
+	u32 status;
+	u32 mfiStatus = 0;
+
+	/*
+	 * Check if it is our interrupt
+	 */
+	status = readl(&regs->outbound_intr_status);
+
+	if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
+		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+	}
+	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
+		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+	}
+
+	/*
+	 * Clear the interrupt by writing back the same value
+	 */
+	if (mfiStatus)
+		writel(status, &regs->outbound_doorbell_clear);
+
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_status);
+
+	return mfiStatus;
+}
+/**
+ * megasas_fire_cmd_gen2 -     Sends command to the FW
+ * @frame_phys_addr :          Physical address of cmd
+ * @frame_count :              Number of frames for the command
+ * @regs :                     MFI register set
+ */
+static inline void
+megasas_fire_cmd_gen2(struct megasas_instance *instance,
+			dma_addr_t frame_phys_addr,
+			u32 frame_count,
+			struct megasas_register_set __iomem *regs)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	writel((frame_phys_addr | (frame_count<<1))|1,
+			&(regs)->inbound_queue_port);
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * megasas_adp_reset_gen2 -	For controller reset
+ * @regs:				MFI register set
+ */
+static int
+megasas_adp_reset_gen2(struct megasas_instance *instance,
+			struct megasas_register_set __iomem *reg_set)
+{
+	u32 retry = 0 ;
+	u32 HostDiag;
+	u32 __iomem *seq_offset = &reg_set->seq_offset;
+	u32 __iomem *hostdiag_offset = &reg_set->host_diag;
+
+	if (instance->instancet == &megasas_instance_template_skinny) {
+		seq_offset = &reg_set->fusion_seq_offset;
+		hostdiag_offset = &reg_set->fusion_host_diag;
+	}
+
+	writel(0, seq_offset);
+	writel(4, seq_offset);
+	writel(0xb, seq_offset);
+	writel(2, seq_offset);
+	writel(7, seq_offset);
+	writel(0xd, seq_offset);
+
+	msleep(1000);
+
+	HostDiag = (u32)readl(hostdiag_offset);
+
+	while (!(HostDiag & DIAG_WRITE_ENABLE)) {
+		msleep(100);
+		HostDiag = (u32)readl(hostdiag_offset);
+		dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
+					retry, HostDiag);
+
+		if (retry++ >= 100)
+			return 1;
+
+	}
+
+	dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
+
+	writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
+
+	ssleep(10);
+
+	HostDiag = (u32)readl(hostdiag_offset);
+	while (HostDiag & DIAG_RESET_ADAPTER) {
+		msleep(100);
+		HostDiag = (u32)readl(hostdiag_offset);
+		dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
+				retry, HostDiag);
+
+		if (retry++ >= 1000)
+			return 1;
+
+	}
+	return 0;
+}
+
+/**
+ * megasas_check_reset_gen2 -	For controller reset check
+ * @regs:				MFI register set
+ */
+static int
+megasas_check_reset_gen2(struct megasas_instance *instance,
+		struct megasas_register_set __iomem *regs)
+{
+	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
+		return 1;
+
+	return 0;
+}
+
+static struct megasas_instance_template megasas_instance_template_gen2 = {
+
+	.fire_cmd = megasas_fire_cmd_gen2,
+	.enable_intr = megasas_enable_intr_gen2,
+	.disable_intr = megasas_disable_intr_gen2,
+	.clear_intr = megasas_clear_intr_gen2,
+	.read_fw_status_reg = megasas_read_fw_status_reg_gen2,
+	.adp_reset = megasas_adp_reset_gen2,
+	.check_reset = megasas_check_reset_gen2,
+	.service_isr = megasas_isr,
+	.tasklet = megasas_complete_cmd_dpc,
+	.init_adapter = megasas_init_adapter_mfi,
+	.build_and_issue_cmd = megasas_build_and_issue_cmd,
+	.issue_dcmd = megasas_issue_dcmd,
+};
+
+/**
+*	This is the end of set of functions & definitions
+*       specific to gen2 (deviceid : 0x78, 0x79) controllers
+*/
+
+/*
+ * Template added for TB (Fusion)
+ */
+extern struct megasas_instance_template megasas_instance_template_fusion;
+
+/**
+ * megasas_issue_polled -	Issues a polling command
+ * @instance:			Adapter soft state
+ * @cmd:			Command packet to be issued
+ *
+ * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
+ */
+int
+megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+	struct megasas_header *frame_hdr = &cmd->frame->hdr;
+
+	frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
+	frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
+
+	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+			__func__, __LINE__);
+		return DCMD_NOT_FIRED;
+	}
+
+	instance->instancet->issue_dcmd(instance, cmd);
+
+	return wait_and_poll(instance, cmd, instance->requestorId ?
+			MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
+}
+
+/**
+ * megasas_issue_blocked_cmd -	Synchronous wrapper around regular FW cmds
+ * @instance:			Adapter soft state
+ * @cmd:			Command to be issued
+ * @timeout:			Timeout in seconds
+ *
+ * This function waits on an event for the command to be returned from ISR.
+ * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
+ * Used to issue ioctl commands.
+ */
+int
+megasas_issue_blocked_cmd(struct megasas_instance *instance,
+			  struct megasas_cmd *cmd, int timeout)
+{
+	int ret = 0;
+	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
+
+	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+			__func__, __LINE__);
+		return DCMD_NOT_FIRED;
+	}
+
+	instance->instancet->issue_dcmd(instance, cmd);
+
+	if (timeout) {
+		ret = wait_event_timeout(instance->int_cmd_wait_q,
+				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
+		if (!ret) {
+			dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n",
+				__func__, __LINE__);
+			return DCMD_TIMEOUT;
+		}
+	} else
+		wait_event(instance->int_cmd_wait_q,
+				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
+
+	return (cmd->cmd_status_drv == MFI_STAT_OK) ?
+		DCMD_SUCCESS : DCMD_FAILED;
+}
+
+/**
+ * megasas_issue_blocked_abort_cmd -	Aborts previously issued cmd
+ * @instance:				Adapter soft state
+ * @cmd_to_abort:			Previously issued cmd to be aborted
+ * @timeout:				Timeout in seconds
+ *
+ * MFI firmware can abort previously issued AEN comamnd (automatic event
+ * notification). The megasas_issue_blocked_abort_cmd() issues such abort
+ * cmd and waits for return status.
+ * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
+ */
+static int
+megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
+				struct megasas_cmd *cmd_to_abort, int timeout)
+{
+	struct megasas_cmd *cmd;
+	struct megasas_abort_frame *abort_fr;
+	int ret = 0;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd)
+		return -1;
+
+	abort_fr = &cmd->frame->abort;
+
+	/*
+	 * Prepare and issue the abort frame
+	 */
+	abort_fr->cmd = MFI_CMD_ABORT;
+	abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
+	abort_fr->flags = cpu_to_le16(0);
+	abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
+	abort_fr->abort_mfi_phys_addr_lo =
+		cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
+	abort_fr->abort_mfi_phys_addr_hi =
+		cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
+
+	cmd->sync_cmd = 1;
+	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
+
+	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+			__func__, __LINE__);
+		return DCMD_NOT_FIRED;
+	}
+
+	instance->instancet->issue_dcmd(instance, cmd);
+
+	if (timeout) {
+		ret = wait_event_timeout(instance->abort_cmd_wait_q,
+				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
+		if (!ret) {
+			dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n",
+				__func__, __LINE__);
+			return DCMD_TIMEOUT;
+		}
+	} else
+		wait_event(instance->abort_cmd_wait_q,
+				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
+
+	cmd->sync_cmd = 0;
+
+	megasas_return_cmd(instance, cmd);
+	return (cmd->cmd_status_drv == MFI_STAT_OK) ?
+		DCMD_SUCCESS : DCMD_FAILED;
+}
+
+/**
+ * megasas_make_sgl32 -	Prepares 32-bit SGL
+ * @instance:		Adapter soft state
+ * @scp:		SCSI command from the mid-layer
+ * @mfi_sgl:		SGL to be filled in
+ *
+ * If successful, this function returns the number of SG elements. Otherwise,
+ * it returnes -1.
+ */
+static int
+megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
+		   union megasas_sgl *mfi_sgl)
+{
+	int i;
+	int sge_count;
+	struct scatterlist *os_sgl;
+
+	sge_count = scsi_dma_map(scp);
+	BUG_ON(sge_count < 0);
+
+	if (sge_count) {
+		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+			mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
+			mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
+		}
+	}
+	return sge_count;
+}
+
+/**
+ * megasas_make_sgl64 -	Prepares 64-bit SGL
+ * @instance:		Adapter soft state
+ * @scp:		SCSI command from the mid-layer
+ * @mfi_sgl:		SGL to be filled in
+ *
+ * If successful, this function returns the number of SG elements. Otherwise,
+ * it returnes -1.
+ */
+static int
+megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
+		   union megasas_sgl *mfi_sgl)
+{
+	int i;
+	int sge_count;
+	struct scatterlist *os_sgl;
+
+	sge_count = scsi_dma_map(scp);
+	BUG_ON(sge_count < 0);
+
+	if (sge_count) {
+		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+			mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
+			mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
+		}
+	}
+	return sge_count;
+}
+
+/**
+ * megasas_make_sgl_skinny - Prepares IEEE SGL
+ * @instance:           Adapter soft state
+ * @scp:                SCSI command from the mid-layer
+ * @mfi_sgl:            SGL to be filled in
+ *
+ * If successful, this function returns the number of SG elements. Otherwise,
+ * it returnes -1.
+ */
+static int
+megasas_make_sgl_skinny(struct megasas_instance *instance,
+		struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
+{
+	int i;
+	int sge_count;
+	struct scatterlist *os_sgl;
+
+	sge_count = scsi_dma_map(scp);
+
+	if (sge_count) {
+		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+			mfi_sgl->sge_skinny[i].length =
+				cpu_to_le32(sg_dma_len(os_sgl));
+			mfi_sgl->sge_skinny[i].phys_addr =
+				cpu_to_le64(sg_dma_address(os_sgl));
+			mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
+		}
+	}
+	return sge_count;
+}
+
+ /**
+ * megasas_get_frame_count - Computes the number of frames
+ * @frame_type		: type of frame- io or pthru frame
+ * @sge_count		: number of sg elements
+ *
+ * Returns the number of frames required for numnber of sge's (sge_count)
+ */
+
+static u32 megasas_get_frame_count(struct megasas_instance *instance,
+			u8 sge_count, u8 frame_type)
+{
+	int num_cnt;
+	int sge_bytes;
+	u32 sge_sz;
+	u32 frame_count = 0;
+
+	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
+	    sizeof(struct megasas_sge32);
+
+	if (instance->flag_ieee) {
+		sge_sz = sizeof(struct megasas_sge_skinny);
+	}
+
+	/*
+	 * Main frame can contain 2 SGEs for 64-bit SGLs and
+	 * 3 SGEs for 32-bit SGLs for ldio &
+	 * 1 SGEs for 64-bit SGLs and
+	 * 2 SGEs for 32-bit SGLs for pthru frame
+	 */
+	if (unlikely(frame_type == PTHRU_FRAME)) {
+		if (instance->flag_ieee == 1) {
+			num_cnt = sge_count - 1;
+		} else if (IS_DMA64)
+			num_cnt = sge_count - 1;
+		else
+			num_cnt = sge_count - 2;
+	} else {
+		if (instance->flag_ieee == 1) {
+			num_cnt = sge_count - 1;
+		} else if (IS_DMA64)
+			num_cnt = sge_count - 2;
+		else
+			num_cnt = sge_count - 3;
+	}
+
+	if (num_cnt > 0) {
+		sge_bytes = sge_sz * num_cnt;
+
+		frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
+		    ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
+	}
+	/* Main frame */
+	frame_count += 1;
+
+	if (frame_count > 7)
+		frame_count = 8;
+	return frame_count;
+}
+
+/**
+ * megasas_build_dcdb -	Prepares a direct cdb (DCDB) command
+ * @instance:		Adapter soft state
+ * @scp:		SCSI command
+ * @cmd:		Command to be prepared in
+ *
+ * This function prepares CDB commands. These are typcially pass-through
+ * commands to the devices.
+ */
+static int
+megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
+		   struct megasas_cmd *cmd)
+{
+	u32 is_logical;
+	u32 device_id;
+	u16 flags = 0;
+	struct megasas_pthru_frame *pthru;
+
+	is_logical = MEGASAS_IS_LOGICAL(scp->device);
+	device_id = MEGASAS_DEV_INDEX(scp);
+	pthru = (struct megasas_pthru_frame *)cmd->frame;
+
+	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+		flags = MFI_FRAME_DIR_WRITE;
+	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+		flags = MFI_FRAME_DIR_READ;
+	else if (scp->sc_data_direction == PCI_DMA_NONE)
+		flags = MFI_FRAME_DIR_NONE;
+
+	if (instance->flag_ieee == 1) {
+		flags |= MFI_FRAME_IEEE;
+	}
+
+	/*
+	 * Prepare the DCDB frame
+	 */
+	pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
+	pthru->cmd_status = 0x0;
+	pthru->scsi_status = 0x0;
+	pthru->target_id = device_id;
+	pthru->lun = scp->device->lun;
+	pthru->cdb_len = scp->cmd_len;
+	pthru->timeout = 0;
+	pthru->pad_0 = 0;
+	pthru->flags = cpu_to_le16(flags);
+	pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
+
+	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
+
+	/*
+	 * If the command is for the tape device, set the
+	 * pthru timeout to the os layer timeout value.
+	 */
+	if (scp->device->type == TYPE_TAPE) {
+		if ((scp->request->timeout / HZ) > 0xFFFF)
+			pthru->timeout = cpu_to_le16(0xFFFF);
+		else
+			pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
+	}
+
+	/*
+	 * Construct SGL
+	 */
+	if (instance->flag_ieee == 1) {
+		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
+		pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
+						      &pthru->sgl);
+	} else if (IS_DMA64) {
+		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
+		pthru->sge_count = megasas_make_sgl64(instance, scp,
+						      &pthru->sgl);
+	} else
+		pthru->sge_count = megasas_make_sgl32(instance, scp,
+						      &pthru->sgl);
+
+	if (pthru->sge_count > instance->max_num_sge) {
+		dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
+			pthru->sge_count);
+		return 0;
+	}
+
+	/*
+	 * Sense info specific
+	 */
+	pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
+	pthru->sense_buf_phys_addr_hi =
+		cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
+	pthru->sense_buf_phys_addr_lo =
+		cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
+
+	/*
+	 * Compute the total number of frames this command consumes. FW uses
+	 * this number to pull sufficient number of frames from host memory.
+	 */
+	cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
+							PTHRU_FRAME);
+
+	return cmd->frame_count;
+}
+
+/**
+ * megasas_build_ldio -	Prepares IOs to logical devices
+ * @instance:		Adapter soft state
+ * @scp:		SCSI command
+ * @cmd:		Command to be prepared
+ *
+ * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
+ */
+static int
+megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
+		   struct megasas_cmd *cmd)
+{
+	u32 device_id;
+	u8 sc = scp->cmnd[0];
+	u16 flags = 0;
+	struct megasas_io_frame *ldio;
+
+	device_id = MEGASAS_DEV_INDEX(scp);
+	ldio = (struct megasas_io_frame *)cmd->frame;
+
+	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+		flags = MFI_FRAME_DIR_WRITE;
+	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+		flags = MFI_FRAME_DIR_READ;
+
+	if (instance->flag_ieee == 1) {
+		flags |= MFI_FRAME_IEEE;
+	}
+
+	/*
+	 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
+	 */
+	ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
+	ldio->cmd_status = 0x0;
+	ldio->scsi_status = 0x0;
+	ldio->target_id = device_id;
+	ldio->timeout = 0;
+	ldio->reserved_0 = 0;
+	ldio->pad_0 = 0;
+	ldio->flags = cpu_to_le16(flags);
+	ldio->start_lba_hi = 0;
+	ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
+
+	/*
+	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
+	 */
+	if (scp->cmd_len == 6) {
+		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
+		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
+						 ((u32) scp->cmnd[2] << 8) |
+						 (u32) scp->cmnd[3]);
+
+		ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
+	}
+
+	/*
+	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
+	 */
+	else if (scp->cmd_len == 10) {
+		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
+					      ((u32) scp->cmnd[7] << 8));
+		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+						 ((u32) scp->cmnd[3] << 16) |
+						 ((u32) scp->cmnd[4] << 8) |
+						 (u32) scp->cmnd[5]);
+	}
+
+	/*
+	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
+	 */
+	else if (scp->cmd_len == 12) {
+		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
+					      ((u32) scp->cmnd[7] << 16) |
+					      ((u32) scp->cmnd[8] << 8) |
+					      (u32) scp->cmnd[9]);
+
+		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+						 ((u32) scp->cmnd[3] << 16) |
+						 ((u32) scp->cmnd[4] << 8) |
+						 (u32) scp->cmnd[5]);
+	}
+
+	/*
+	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
+	 */
+	else if (scp->cmd_len == 16) {
+		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
+					      ((u32) scp->cmnd[11] << 16) |
+					      ((u32) scp->cmnd[12] << 8) |
+					      (u32) scp->cmnd[13]);
+
+		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
+						 ((u32) scp->cmnd[7] << 16) |
+						 ((u32) scp->cmnd[8] << 8) |
+						 (u32) scp->cmnd[9]);
+
+		ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+						 ((u32) scp->cmnd[3] << 16) |
+						 ((u32) scp->cmnd[4] << 8) |
+						 (u32) scp->cmnd[5]);
+
+	}
+
+	/*
+	 * Construct SGL
+	 */
+	if (instance->flag_ieee) {
+		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
+		ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
+					      &ldio->sgl);
+	} else if (IS_DMA64) {
+		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
+		ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
+	} else
+		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
+
+	if (ldio->sge_count > instance->max_num_sge) {
+		dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
+			ldio->sge_count);
+		return 0;
+	}
+
+	/*
+	 * Sense info specific
+	 */
+	ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
+	ldio->sense_buf_phys_addr_hi = 0;
+	ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
+
+	/*
+	 * Compute the total number of frames this command consumes. FW uses
+	 * this number to pull sufficient number of frames from host memory.
+	 */
+	cmd->frame_count = megasas_get_frame_count(instance,
+			ldio->sge_count, IO_FRAME);
+
+	return cmd->frame_count;
+}
+
+/**
+ * megasas_cmd_type -		Checks if the cmd is for logical drive/sysPD
+ *				and whether it's RW or non RW
+ * @scmd:			SCSI command
+ *
+ */
+inline int megasas_cmd_type(struct scsi_cmnd *cmd)
+{
+	int ret;
+
+	switch (cmd->cmnd[0]) {
+	case READ_10:
+	case WRITE_10:
+	case READ_12:
+	case WRITE_12:
+	case READ_6:
+	case WRITE_6:
+	case READ_16:
+	case WRITE_16:
+		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
+			READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
+		break;
+	default:
+		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
+			NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
+	}
+	return ret;
+}
+
+ /**
+ * megasas_dump_pending_frames -	Dumps the frame address of all pending cmds
+ *					in FW
+ * @instance:				Adapter soft state
+ */
+static inline void
+megasas_dump_pending_frames(struct megasas_instance *instance)
+{
+	struct megasas_cmd *cmd;
+	int i,n;
+	union megasas_sgl *mfi_sgl;
+	struct megasas_io_frame *ldio;
+	struct megasas_pthru_frame *pthru;
+	u32 sgcount;
+	u16 max_cmd = instance->max_fw_cmds;
+
+	dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
+	dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
+	if (IS_DMA64)
+		dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
+	else
+		dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
+
+	dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
+	for (i = 0; i < max_cmd; i++) {
+		cmd = instance->cmd_list[i];
+		if (!cmd->scmd)
+			continue;
+		dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
+		if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
+			ldio = (struct megasas_io_frame *)cmd->frame;
+			mfi_sgl = &ldio->sgl;
+			sgcount = ldio->sge_count;
+			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
+			" lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
+			instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
+			le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
+			le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
+		} else {
+			pthru = (struct megasas_pthru_frame *) cmd->frame;
+			mfi_sgl = &pthru->sgl;
+			sgcount = pthru->sge_count;
+			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
+			"lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
+			instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
+			pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
+			le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
+		}
+		if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
+			for (n = 0; n < sgcount; n++) {
+				if (IS_DMA64)
+					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
+						le32_to_cpu(mfi_sgl->sge64[n].length),
+						le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
+				else
+					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
+						le32_to_cpu(mfi_sgl->sge32[n].length),
+						le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
+			}
+		}
+	} /*for max_cmd*/
+	dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
+	for (i = 0; i < max_cmd; i++) {
+
+		cmd = instance->cmd_list[i];
+
+		if (cmd->sync_cmd == 1)
+			dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
+	}
+	dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
+}
+
+u32
+megasas_build_and_issue_cmd(struct megasas_instance *instance,
+			    struct scsi_cmnd *scmd)
+{
+	struct megasas_cmd *cmd;
+	u32 frame_count;
+
+	cmd = megasas_get_cmd(instance);
+	if (!cmd)
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	/*
+	 * Logical drive command
+	 */
+	if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
+		frame_count = megasas_build_ldio(instance, scmd, cmd);
+	else
+		frame_count = megasas_build_dcdb(instance, scmd, cmd);
+
+	if (!frame_count)
+		goto out_return_cmd;
+
+	cmd->scmd = scmd;
+	scmd->SCp.ptr = (char *)cmd;
+
+	/*
+	 * Issue the command to the FW
+	 */
+	atomic_inc(&instance->fw_outstanding);
+
+	instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
+				cmd->frame_count-1, instance->reg_set);
+
+	return 0;
+out_return_cmd:
+	megasas_return_cmd(instance, cmd);
+	return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+
+/**
+ * megasas_queue_command -	Queue entry point
+ * @scmd:			SCSI command to be queued
+ * @done:			Callback entry point
+ */
+static int
+megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+{
+	struct megasas_instance *instance;
+	struct MR_PRIV_DEVICE *mr_device_priv_data;
+
+	instance = (struct megasas_instance *)
+	    scmd->device->host->hostdata;
+
+	if (instance->unload == 1) {
+		scmd->result = DID_NO_CONNECT << 16;
+		scmd->scsi_done(scmd);
+		return 0;
+	}
+
+	if (instance->issuepend_done == 0)
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+
+	/* Check for an mpio path and adjust behavior */
+	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
+		if (megasas_check_mpio_paths(instance, scmd) ==
+		    (DID_REQUEUE << 16)) {
+			return SCSI_MLQUEUE_HOST_BUSY;
+		} else {
+			scmd->result = DID_NO_CONNECT << 16;
+			scmd->scsi_done(scmd);
+			return 0;
+		}
+	}
+
+	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+		scmd->result = DID_NO_CONNECT << 16;
+		scmd->scsi_done(scmd);
+		return 0;
+	}
+
+	mr_device_priv_data = scmd->device->hostdata;
+	if (!mr_device_priv_data) {
+		scmd->result = DID_NO_CONNECT << 16;
+		scmd->scsi_done(scmd);
+		return 0;
+	}
+
+	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	if (mr_device_priv_data->tm_busy)
+		return SCSI_MLQUEUE_DEVICE_BUSY;
+
+
+	scmd->result = 0;
+
+	if (MEGASAS_IS_LOGICAL(scmd->device) &&
+	    (scmd->device->id >= instance->fw_supported_vd_count ||
+		scmd->device->lun)) {
+		scmd->result = DID_BAD_TARGET << 16;
+		goto out_done;
+	}
+
+	if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
+	    MEGASAS_IS_LOGICAL(scmd->device) &&
+	    (!instance->fw_sync_cache_support)) {
+		scmd->result = DID_OK << 16;
+		goto out_done;
+	}
+
+	return instance->instancet->build_and_issue_cmd(instance, scmd);
+
+ out_done:
+	scmd->scsi_done(scmd);
+	return 0;
+}
+
+static struct megasas_instance *megasas_lookup_instance(u16 host_no)
+{
+	int i;
+
+	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+
+		if ((megasas_mgmt_info.instance[i]) &&
+		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
+			return megasas_mgmt_info.instance[i];
+	}
+
+	return NULL;
+}
+
+/*
+* megasas_set_dynamic_target_properties -
+* Device property set by driver may not be static and it is required to be
+* updated after OCR
+*
+* set tm_capable.
+* set dma alignment (only for eedp protection enable vd).
+*
+* @sdev: OS provided scsi device
+*
+* Returns void
+*/
+void megasas_set_dynamic_target_properties(struct scsi_device *sdev)
+{
+	u16 pd_index = 0, ld;
+	u32 device_id;
+	struct megasas_instance *instance;
+	struct fusion_context *fusion;
+	struct MR_PRIV_DEVICE *mr_device_priv_data;
+	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
+	struct MR_LD_RAID *raid;
+	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
+
+	instance = megasas_lookup_instance(sdev->host->host_no);
+	fusion = instance->ctrl_context;
+	mr_device_priv_data = sdev->hostdata;
+
+	if (!fusion || !mr_device_priv_data)
+		return;
+
+	if (MEGASAS_IS_LOGICAL(sdev)) {
+		device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
+					+ sdev->id;
+		local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
+		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
+		if (ld >= instance->fw_supported_vd_count)
+			return;
+		raid = MR_LdRaidGet(ld, local_map_ptr);
+
+		if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
+		blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
+
+		mr_device_priv_data->is_tm_capable =
+			raid->capability.tmCapable;
+	} else if (instance->use_seqnum_jbod_fp) {
+		pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
+			sdev->id;
+		pd_sync = (void *)fusion->pd_seq_sync
+				[(instance->pd_seq_map_id - 1) & 1];
+		mr_device_priv_data->is_tm_capable =
+			pd_sync->seq[pd_index].capability.tmCapable;
+	}
+}
+
+/*
+ * megasas_set_nvme_device_properties -
+ * set nomerges=2
+ * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
+ * set maximum io transfer = MDTS of NVME device provided by MR firmware.
+ *
+ * MR firmware provides value in KB. Caller of this function converts
+ * kb into bytes.
+ *
+ * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
+ * MR firmware provides value 128 as (32 * 4K) = 128K.
+ *
+ * @sdev:				scsi device
+ * @max_io_size:				maximum io transfer size
+ *
+ */
+static inline void
+megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
+{
+	struct megasas_instance *instance;
+	u32 mr_nvme_pg_size;
+
+	instance = (struct megasas_instance *)sdev->host->hostdata;
+	mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
+				MR_DEFAULT_NVME_PAGE_SIZE);
+
+	blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
+
+	queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue);
+	blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
+}
+
+
+/*
+ * megasas_set_static_target_properties -
+ * Device property set by driver are static and it is not required to be
+ * updated after OCR.
+ *
+ * set io timeout
+ * set device queue depth
+ * set nvme device properties. see - megasas_set_nvme_device_properties
+ *
+ * @sdev:				scsi device
+ * @is_target_prop			true, if fw provided target properties.
+ */
+static void megasas_set_static_target_properties(struct scsi_device *sdev,
+						 bool is_target_prop)
+{
+	u16	target_index = 0;
+	u8 interface_type;
+	u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
+	u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
+	u32 tgt_device_qd;
+	struct megasas_instance *instance;
+	struct MR_PRIV_DEVICE *mr_device_priv_data;
+
+	instance = megasas_lookup_instance(sdev->host->host_no);
+	mr_device_priv_data = sdev->hostdata;
+	interface_type  = mr_device_priv_data->interface_type;
+
+	/*
+	 * The RAID firmware may require extended timeouts.
+	 */
+	blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
+
+	target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
+
+	switch (interface_type) {
+	case SAS_PD:
+		device_qd = MEGASAS_SAS_QD;
+		break;
+	case SATA_PD:
+		device_qd = MEGASAS_SATA_QD;
+		break;
+	case NVME_PD:
+		device_qd = MEGASAS_NVME_QD;
+		break;
+	}
+
+	if (is_target_prop) {
+		tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
+		if (tgt_device_qd &&
+		    (tgt_device_qd <= instance->host->can_queue))
+			device_qd = tgt_device_qd;
+
+		/* max_io_size_kb will be set to non zero for
+		 * nvme based vd and syspd.
+		 */
+		max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
+	}
+
+	if (instance->nvme_page_size && max_io_size_kb)
+		megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
+
+	scsi_change_queue_depth(sdev, device_qd);
+
+}
+
+
+static int megasas_slave_configure(struct scsi_device *sdev)
+{
+	u16 pd_index = 0;
+	struct megasas_instance *instance;
+	int ret_target_prop = DCMD_FAILED;
+	bool is_target_prop = false;
+
+	instance = megasas_lookup_instance(sdev->host->host_no);
+	if (instance->pd_list_not_supported) {
+		if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
+			pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
+				sdev->id;
+			if (instance->pd_list[pd_index].driveState !=
+				MR_PD_STATE_SYSTEM)
+				return -ENXIO;
+		}
+	}
+
+	mutex_lock(&instance->hba_mutex);
+	/* Send DCMD to Firmware and cache the information */
+	if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
+		megasas_get_pd_info(instance, sdev);
+
+	/* Some ventura firmware may not have instance->nvme_page_size set.
+	 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
+	 */
+	if ((instance->tgt_prop) && (instance->nvme_page_size))
+		ret_target_prop = megasas_get_target_prop(instance, sdev);
+
+	is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
+	megasas_set_static_target_properties(sdev, is_target_prop);
+
+	mutex_unlock(&instance->hba_mutex);
+
+	/* This sdev property may change post OCR */
+	megasas_set_dynamic_target_properties(sdev);
+
+	return 0;
+}
+
+static int megasas_slave_alloc(struct scsi_device *sdev)
+{
+	u16 pd_index = 0;
+	struct megasas_instance *instance ;
+	struct MR_PRIV_DEVICE *mr_device_priv_data;
+
+	instance = megasas_lookup_instance(sdev->host->host_no);
+	if (!MEGASAS_IS_LOGICAL(sdev)) {
+		/*
+		 * Open the OS scan to the SYSTEM PD
+		 */
+		pd_index =
+			(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
+			sdev->id;
+		if ((instance->pd_list_not_supported ||
+			instance->pd_list[pd_index].driveState ==
+			MR_PD_STATE_SYSTEM)) {
+			goto scan_target;
+		}
+		return -ENXIO;
+	}
+
+scan_target:
+	mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
+					GFP_KERNEL);
+	if (!mr_device_priv_data)
+		return -ENOMEM;
+	sdev->hostdata = mr_device_priv_data;
+
+	atomic_set(&mr_device_priv_data->r1_ldio_hint,
+		   instance->r1_ldio_hint_default);
+	return 0;
+}
+
+static void megasas_slave_destroy(struct scsi_device *sdev)
+{
+	kfree(sdev->hostdata);
+	sdev->hostdata = NULL;
+}
+
+/*
+* megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
+*                                       kill adapter
+* @instance:				Adapter soft state
+*
+*/
+static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
+{
+	int i;
+	struct megasas_cmd *cmd_mfi;
+	struct megasas_cmd_fusion *cmd_fusion;
+	struct fusion_context *fusion = instance->ctrl_context;
+
+	/* Find all outstanding ioctls */
+	if (fusion) {
+		for (i = 0; i < instance->max_fw_cmds; i++) {
+			cmd_fusion = fusion->cmd_list[i];
+			if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
+				cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+				if (cmd_mfi->sync_cmd &&
+				    (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
+					cmd_mfi->frame->hdr.cmd_status =
+							MFI_STAT_WRONG_STATE;
+					megasas_complete_cmd(instance,
+							     cmd_mfi, DID_OK);
+				}
+			}
+		}
+	} else {
+		for (i = 0; i < instance->max_fw_cmds; i++) {
+			cmd_mfi = instance->cmd_list[i];
+			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
+				MFI_CMD_ABORT)
+				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
+		}
+	}
+}
+
+
+void megaraid_sas_kill_hba(struct megasas_instance *instance)
+{
+	/* Set critical error to block I/O & ioctls in case caller didn't */
+	atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
+	/* Wait 1 second to ensure IO or ioctls in build have posted */
+	msleep(1000);
+	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+		(instance->adapter_type != MFI_SERIES)) {
+		writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
+		/* Flush */
+		readl(&instance->reg_set->doorbell);
+		if (instance->requestorId && instance->peerIsPresent)
+			memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+	} else {
+		writel(MFI_STOP_ADP,
+			&instance->reg_set->inbound_doorbell);
+	}
+	/* Complete outstanding ioctls when adapter is killed */
+	megasas_complete_outstanding_ioctls(instance);
+}
+
+ /**
+  * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
+  *					restored to max value
+  * @instance:			Adapter soft state
+  *
+  */
+void
+megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
+{
+	unsigned long flags;
+
+	if (instance->flag & MEGASAS_FW_BUSY
+	    && time_after(jiffies, instance->last_time + 5 * HZ)
+	    && atomic_read(&instance->fw_outstanding) <
+	    instance->throttlequeuedepth + 1) {
+
+		spin_lock_irqsave(instance->host->host_lock, flags);
+		instance->flag &= ~MEGASAS_FW_BUSY;
+
+		instance->host->can_queue = instance->cur_can_queue;
+		spin_unlock_irqrestore(instance->host->host_lock, flags);
+	}
+}
+
+/**
+ * megasas_complete_cmd_dpc	 -	Returns FW's controller structure
+ * @instance_addr:			Address of adapter soft state
+ *
+ * Tasklet to complete cmds
+ */
+static void megasas_complete_cmd_dpc(unsigned long instance_addr)
+{
+	u32 producer;
+	u32 consumer;
+	u32 context;
+	struct megasas_cmd *cmd;
+	struct megasas_instance *instance =
+				(struct megasas_instance *)instance_addr;
+	unsigned long flags;
+
+	/* If we have already declared adapter dead, donot complete cmds */
+	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
+		return;
+
+	spin_lock_irqsave(&instance->completion_lock, flags);
+
+	producer = le32_to_cpu(*instance->producer);
+	consumer = le32_to_cpu(*instance->consumer);
+
+	while (consumer != producer) {
+		context = le32_to_cpu(instance->reply_queue[consumer]);
+		if (context >= instance->max_fw_cmds) {
+			dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
+				context);
+			BUG();
+		}
+
+		cmd = instance->cmd_list[context];
+
+		megasas_complete_cmd(instance, cmd, DID_OK);
+
+		consumer++;
+		if (consumer == (instance->max_fw_cmds + 1)) {
+			consumer = 0;
+		}
+	}
+
+	*instance->consumer = cpu_to_le32(producer);
+
+	spin_unlock_irqrestore(&instance->completion_lock, flags);
+
+	/*
+	 * Check if we can restore can_queue
+	 */
+	megasas_check_and_restore_queue_depth(instance);
+}
+
+/**
+ * megasas_start_timer - Initializes a timer object
+ * @instance:		Adapter soft state
+ * @timer:		timer object to be initialized
+ * @fn:			timer function
+ * @interval:		time interval between timer function call
+ *
+ */
+void megasas_start_timer(struct megasas_instance *instance,
+			struct timer_list *timer,
+			void *fn, unsigned long interval)
+{
+	init_timer(timer);
+	timer->expires = jiffies + interval;
+	timer->data = (unsigned long)instance;
+	timer->function = fn;
+	add_timer(timer);
+}
+
+static void
+megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
+
+static void
+process_fw_state_change_wq(struct work_struct *work);
+
+void megasas_do_ocr(struct megasas_instance *instance)
+{
+	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
+	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
+	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
+		*instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
+	}
+	instance->instancet->disable_intr(instance);
+	atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
+	instance->issuepend_done = 0;
+
+	atomic_set(&instance->fw_outstanding, 0);
+	megasas_internal_reset_defer_cmds(instance);
+	process_fw_state_change_wq(&instance->work_init);
+}
+
+static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
+					    int initial)
+{
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
+	dma_addr_t new_affiliation_111_h;
+	int ld, retval = 0;
+	u8 thisVf;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
+		       "Failed to get cmd for scsi%d\n",
+			instance->host->host_no);
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	if (!instance->vf_affiliation_111) {
+		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
+		       "affiliation for scsi%d\n", instance->host->host_no);
+		megasas_return_cmd(instance, cmd);
+		return -ENOMEM;
+	}
+
+	if (initial)
+			memset(instance->vf_affiliation_111, 0,
+			       sizeof(struct MR_LD_VF_AFFILIATION_111));
+	else {
+		new_affiliation_111 =
+			pci_alloc_consistent(instance->pdev,
+					     sizeof(struct MR_LD_VF_AFFILIATION_111),
+					     &new_affiliation_111_h);
+		if (!new_affiliation_111) {
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
+			       "memory for new affiliation for scsi%d\n",
+			       instance->host->host_no);
+			megasas_return_cmd(instance, cmd);
+			return -ENOMEM;
+		}
+		memset(new_affiliation_111, 0,
+		       sizeof(struct MR_LD_VF_AFFILIATION_111));
+	}
+
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
+	dcmd->sge_count = 1;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len =
+		cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
+	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
+
+	if (initial)
+		dcmd->sgl.sge32[0].phys_addr =
+			cpu_to_le32(instance->vf_affiliation_111_h);
+	else
+		dcmd->sgl.sge32[0].phys_addr =
+			cpu_to_le32(new_affiliation_111_h);
+
+	dcmd->sgl.sge32[0].length = cpu_to_le32(
+		sizeof(struct MR_LD_VF_AFFILIATION_111));
+
+	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
+	       "scsi%d\n", instance->host->host_no);
+
+	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
+		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
+		       " failed with status 0x%x for scsi%d\n",
+		       dcmd->cmd_status, instance->host->host_no);
+		retval = 1; /* Do a scan if we couldn't get affiliation */
+		goto out;
+	}
+
+	if (!initial) {
+		thisVf = new_affiliation_111->thisVf;
+		for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
+			if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
+			    new_affiliation_111->map[ld].policy[thisVf]) {
+				dev_warn(&instance->pdev->dev, "SR-IOV: "
+				       "Got new LD/VF affiliation for scsi%d\n",
+				       instance->host->host_no);
+				memcpy(instance->vf_affiliation_111,
+				       new_affiliation_111,
+				       sizeof(struct MR_LD_VF_AFFILIATION_111));
+				retval = 1;
+				goto out;
+			}
+	}
+out:
+	if (new_affiliation_111) {
+		pci_free_consistent(instance->pdev,
+				    sizeof(struct MR_LD_VF_AFFILIATION_111),
+				    new_affiliation_111,
+				    new_affiliation_111_h);
+	}
+
+	megasas_return_cmd(instance, cmd);
+
+	return retval;
+}
+
+static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
+					    int initial)
+{
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
+	struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
+	dma_addr_t new_affiliation_h;
+	int i, j, retval = 0, found = 0, doscan = 0;
+	u8 thisVf;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
+		       "Failed to get cmd for scsi%d\n",
+		       instance->host->host_no);
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	if (!instance->vf_affiliation) {
+		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
+		       "affiliation for scsi%d\n", instance->host->host_no);
+		megasas_return_cmd(instance, cmd);
+		return -ENOMEM;
+	}
+
+	if (initial)
+		memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
+		       sizeof(struct MR_LD_VF_AFFILIATION));
+	else {
+		new_affiliation =
+			pci_alloc_consistent(instance->pdev,
+					     (MAX_LOGICAL_DRIVES + 1) *
+					     sizeof(struct MR_LD_VF_AFFILIATION),
+					     &new_affiliation_h);
+		if (!new_affiliation) {
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
+			       "memory for new affiliation for scsi%d\n",
+			       instance->host->host_no);
+			megasas_return_cmd(instance, cmd);
+			return -ENOMEM;
+		}
+		memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
+		       sizeof(struct MR_LD_VF_AFFILIATION));
+	}
+
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
+	dcmd->sge_count = 1;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
+		sizeof(struct MR_LD_VF_AFFILIATION));
+	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
+
+	if (initial)
+		dcmd->sgl.sge32[0].phys_addr =
+			cpu_to_le32(instance->vf_affiliation_h);
+	else
+		dcmd->sgl.sge32[0].phys_addr =
+			cpu_to_le32(new_affiliation_h);
+
+	dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
+		sizeof(struct MR_LD_VF_AFFILIATION));
+
+	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
+	       "scsi%d\n", instance->host->host_no);
+
+
+	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
+		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
+		       " failed with status 0x%x for scsi%d\n",
+		       dcmd->cmd_status, instance->host->host_no);
+		retval = 1; /* Do a scan if we couldn't get affiliation */
+		goto out;
+	}
+
+	if (!initial) {
+		if (!new_affiliation->ldCount) {
+			dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
+			       "affiliation for passive path for scsi%d\n",
+			       instance->host->host_no);
+			retval = 1;
+			goto out;
+		}
+		newmap = new_affiliation->map;
+		savedmap = instance->vf_affiliation->map;
+		thisVf = new_affiliation->thisVf;
+		for (i = 0 ; i < new_affiliation->ldCount; i++) {
+			found = 0;
+			for (j = 0; j < instance->vf_affiliation->ldCount;
+			     j++) {
+				if (newmap->ref.targetId ==
+				    savedmap->ref.targetId) {
+					found = 1;
+					if (newmap->policy[thisVf] !=
+					    savedmap->policy[thisVf]) {
+						doscan = 1;
+						goto out;
+					}
+				}
+				savedmap = (struct MR_LD_VF_MAP *)
+					((unsigned char *)savedmap +
+					 savedmap->size);
+			}
+			if (!found && newmap->policy[thisVf] !=
+			    MR_LD_ACCESS_HIDDEN) {
+				doscan = 1;
+				goto out;
+			}
+			newmap = (struct MR_LD_VF_MAP *)
+				((unsigned char *)newmap + newmap->size);
+		}
+
+		newmap = new_affiliation->map;
+		savedmap = instance->vf_affiliation->map;
+
+		for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
+			found = 0;
+			for (j = 0 ; j < new_affiliation->ldCount; j++) {
+				if (savedmap->ref.targetId ==
+				    newmap->ref.targetId) {
+					found = 1;
+					if (savedmap->policy[thisVf] !=
+					    newmap->policy[thisVf]) {
+						doscan = 1;
+						goto out;
+					}
+				}
+				newmap = (struct MR_LD_VF_MAP *)
+					((unsigned char *)newmap +
+					 newmap->size);
+			}
+			if (!found && savedmap->policy[thisVf] !=
+			    MR_LD_ACCESS_HIDDEN) {
+				doscan = 1;
+				goto out;
+			}
+			savedmap = (struct MR_LD_VF_MAP *)
+				((unsigned char *)savedmap +
+				 savedmap->size);
+		}
+	}
+out:
+	if (doscan) {
+		dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
+		       "affiliation for scsi%d\n", instance->host->host_no);
+		memcpy(instance->vf_affiliation, new_affiliation,
+		       new_affiliation->size);
+		retval = 1;
+	}
+
+	if (new_affiliation)
+		pci_free_consistent(instance->pdev,
+				    (MAX_LOGICAL_DRIVES + 1) *
+				    sizeof(struct MR_LD_VF_AFFILIATION),
+				    new_affiliation, new_affiliation_h);
+	megasas_return_cmd(instance, cmd);
+
+	return retval;
+}
+
+/* This function will get the current SR-IOV LD/VF affiliation */
+static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
+	int initial)
+{
+	int retval;
+
+	if (instance->PlasmaFW111)
+		retval = megasas_get_ld_vf_affiliation_111(instance, initial);
+	else
+		retval = megasas_get_ld_vf_affiliation_12(instance, initial);
+	return retval;
+}
+
+/* This function will tell FW to start the SR-IOV heartbeat */
+int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
+					 int initial)
+{
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	int retval = 0;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
+		       "Failed to get cmd for scsi%d\n",
+		       instance->host->host_no);
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	if (initial) {
+		instance->hb_host_mem =
+			pci_zalloc_consistent(instance->pdev,
+					      sizeof(struct MR_CTRL_HB_HOST_MEM),
+					      &instance->hb_host_mem_h);
+		if (!instance->hb_host_mem) {
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
+			       " memory for heartbeat host memory for scsi%d\n",
+			       instance->host->host_no);
+			retval = -ENOMEM;
+			goto out;
+		}
+	}
+
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
+	dcmd->sge_count = 1;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
+	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
+
+	dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
+	       instance->host->host_no);
+
+	if ((instance->adapter_type != MFI_SERIES) &&
+	    !instance->mask_interrupts)
+		retval = megasas_issue_blocked_cmd(instance, cmd,
+			MEGASAS_ROUTINE_WAIT_TIME_VF);
+	else
+		retval = megasas_issue_polled(instance, cmd);
+
+	if (retval) {
+		dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
+			"_MEM_ALLOC DCMD %s for scsi%d\n",
+			(dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
+			"timed out" : "failed", instance->host->host_no);
+		retval = 1;
+	}
+
+out:
+	megasas_return_cmd(instance, cmd);
+
+	return retval;
+}
+
+/* Handler for SR-IOV heartbeat */
+void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
+{
+	struct megasas_instance *instance =
+		(struct megasas_instance *)instance_addr;
+
+	if (instance->hb_host_mem->HB.fwCounter !=
+	    instance->hb_host_mem->HB.driverCounter) {
+		instance->hb_host_mem->HB.driverCounter =
+			instance->hb_host_mem->HB.fwCounter;
+		mod_timer(&instance->sriov_heartbeat_timer,
+			  jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+	} else {
+		dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
+		       "completed for scsi%d\n", instance->host->host_no);
+		schedule_work(&instance->work_init);
+	}
+}
+
+/**
+ * megasas_wait_for_outstanding -	Wait for all outstanding cmds
+ * @instance:				Adapter soft state
+ *
+ * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
+ * complete all its outstanding commands. Returns error if one or more IOs
+ * are pending after this time period. It also marks the controller dead.
+ */
+static int megasas_wait_for_outstanding(struct megasas_instance *instance)
+{
+	int i, sl, outstanding;
+	u32 reset_index;
+	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
+	unsigned long flags;
+	struct list_head clist_local;
+	struct megasas_cmd *reset_cmd;
+	u32 fw_state;
+
+	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+		dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
+		__func__, __LINE__);
+		return FAILED;
+	}
+
+	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
+
+		INIT_LIST_HEAD(&clist_local);
+		spin_lock_irqsave(&instance->hba_lock, flags);
+		list_splice_init(&instance->internal_reset_pending_q,
+				&clist_local);
+		spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+		dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
+		for (i = 0; i < wait_time; i++) {
+			msleep(1000);
+			if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
+				break;
+		}
+
+		if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
+			dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
+			atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
+			return FAILED;
+		}
+
+		reset_index = 0;
+		while (!list_empty(&clist_local)) {
+			reset_cmd = list_entry((&clist_local)->next,
+						struct megasas_cmd, list);
+			list_del_init(&reset_cmd->list);
+			if (reset_cmd->scmd) {
+				reset_cmd->scmd->result = DID_REQUEUE << 16;
+				dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
+					reset_index, reset_cmd,
+					reset_cmd->scmd->cmnd[0]);
+
+				reset_cmd->scmd->scsi_done(reset_cmd->scmd);
+				megasas_return_cmd(instance, reset_cmd);
+			} else if (reset_cmd->sync_cmd) {
+				dev_notice(&instance->pdev->dev, "%p synch cmds"
+						"reset queue\n",
+						reset_cmd);
+
+				reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
+				instance->instancet->fire_cmd(instance,
+						reset_cmd->frame_phys_addr,
+						0, instance->reg_set);
+			} else {
+				dev_notice(&instance->pdev->dev, "%p unexpected"
+					"cmds lst\n",
+					reset_cmd);
+			}
+			reset_index++;
+		}
+
+		return SUCCESS;
+	}
+
+	for (i = 0; i < resetwaittime; i++) {
+		outstanding = atomic_read(&instance->fw_outstanding);
+
+		if (!outstanding)
+			break;
+
+		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
+			       "commands to complete\n",i,outstanding);
+			/*
+			 * Call cmd completion routine. Cmd to be
+			 * be completed directly without depending on isr.
+			 */
+			megasas_complete_cmd_dpc((unsigned long)instance);
+		}
+
+		msleep(1000);
+	}
+
+	i = 0;
+	outstanding = atomic_read(&instance->fw_outstanding);
+	fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+
+	if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
+		goto no_outstanding;
+
+	if (instance->disableOnlineCtrlReset)
+		goto kill_hba_and_failed;
+	do {
+		if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
+			dev_info(&instance->pdev->dev,
+				"%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n",
+				__func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
+			if (i == 3)
+				goto kill_hba_and_failed;
+			megasas_do_ocr(instance);
+
+			if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+				dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
+				__func__, __LINE__);
+				return FAILED;
+			}
+			dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
+				__func__, __LINE__);
+
+			for (sl = 0; sl < 10; sl++)
+				msleep(500);
+
+			outstanding = atomic_read(&instance->fw_outstanding);
+
+			fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+			if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
+				goto no_outstanding;
+		}
+		i++;
+	} while (i <= 3);
+
+no_outstanding:
+
+	dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
+		__func__, __LINE__);
+	return SUCCESS;
+
+kill_hba_and_failed:
+
+	/* Reset not supported, kill adapter */
+	dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
+		" disableOnlineCtrlReset %d fw_outstanding %d \n",
+		__func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
+		atomic_read(&instance->fw_outstanding));
+	megasas_dump_pending_frames(instance);
+	megaraid_sas_kill_hba(instance);
+
+	return FAILED;
+}
+
+/**
+ * megasas_generic_reset -	Generic reset routine
+ * @scmd:			Mid-layer SCSI command
+ *
+ * This routine implements a generic reset handler for device, bus and host
+ * reset requests. Device, bus and host specific reset handlers can use this
+ * function after they do their specific tasks.
+ */
+static int megasas_generic_reset(struct scsi_cmnd *scmd)
+{
+	int ret_val;
+	struct megasas_instance *instance;
+
+	instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+	scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
+		 scmd->cmnd[0], scmd->retries);
+
+	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+		dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
+		return FAILED;
+	}
+
+	ret_val = megasas_wait_for_outstanding(instance);
+	if (ret_val == SUCCESS)
+		dev_notice(&instance->pdev->dev, "reset successful\n");
+	else
+		dev_err(&instance->pdev->dev, "failed to do reset\n");
+
+	return ret_val;
+}
+
+/**
+ * megasas_reset_timer - quiesce the adapter if required
+ * @scmd:		scsi cmnd
+ *
+ * Sets the FW busy flag and reduces the host->can_queue if the
+ * cmd has not been completed within the timeout period.
+ */
+static enum
+blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
+{
+	struct megasas_instance *instance;
+	unsigned long flags;
+
+	if (time_after(jiffies, scmd->jiffies_at_alloc +
+				(scmd_timeout * 2) * HZ)) {
+		return BLK_EH_NOT_HANDLED;
+	}
+
+	instance = (struct megasas_instance *)scmd->device->host->hostdata;
+	if (!(instance->flag & MEGASAS_FW_BUSY)) {
+		/* FW is busy, throttle IO */
+		spin_lock_irqsave(instance->host->host_lock, flags);
+
+		instance->host->can_queue = instance->throttlequeuedepth;
+		instance->last_time = jiffies;
+		instance->flag |= MEGASAS_FW_BUSY;
+
+		spin_unlock_irqrestore(instance->host->host_lock, flags);
+	}
+	return BLK_EH_RESET_TIMER;
+}
+
+/**
+ * megasas_dump_frame -	This function will dump MPT/MFI frame
+ */
+static inline void
+megasas_dump_frame(void *mpi_request, int sz)
+{
+	int i;
+	__le32 *mfp = (__le32 *)mpi_request;
+
+	printk(KERN_INFO "IO request frame:\n\t");
+	for (i = 0; i < sz / sizeof(__le32); i++) {
+		if (i && ((i % 8) == 0))
+			printk("\n\t");
+		printk("%08x ", le32_to_cpu(mfp[i]));
+	}
+	printk("\n");
+}
+
+/**
+ * megasas_reset_bus_host -	Bus & host reset handler entry point
+ */
+static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
+{
+	int ret;
+	struct megasas_instance *instance;
+
+	instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+	scmd_printk(KERN_INFO, scmd,
+		"Controller reset is requested due to IO timeout\n"
+		"SCSI command pointer: (%p)\t SCSI host state: %d\t"
+		" SCSI host busy: %d\t FW outstanding: %d\n",
+		scmd, scmd->device->host->shost_state,
+		atomic_read((atomic_t *)&scmd->device->host->host_busy),
+		atomic_read(&instance->fw_outstanding));
+
+	/*
+	 * First wait for all commands to complete
+	 */
+	if (instance->adapter_type == MFI_SERIES) {
+		ret = megasas_generic_reset(scmd);
+	} else {
+		struct megasas_cmd_fusion *cmd;
+		cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
+		if (cmd)
+			megasas_dump_frame(cmd->io_request,
+				MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
+		ret = megasas_reset_fusion(scmd->device->host,
+				SCSIIO_TIMEOUT_OCR);
+	}
+
+	return ret;
+}
+
+/**
+ * megasas_task_abort - Issues task abort request to firmware
+ *			(supported only for fusion adapters)
+ * @scmd:		SCSI command pointer
+ */
+static int megasas_task_abort(struct scsi_cmnd *scmd)
+{
+	int ret;
+	struct megasas_instance *instance;
+
+	instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+	if (instance->adapter_type != MFI_SERIES)
+		ret = megasas_task_abort_fusion(scmd);
+	else {
+		sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
+		ret = FAILED;
+	}
+
+	return ret;
+}
+
+/**
+ * megasas_reset_target:  Issues target reset request to firmware
+ *                        (supported only for fusion adapters)
+ * @scmd:                 SCSI command pointer
+ */
+static int megasas_reset_target(struct scsi_cmnd *scmd)
+{
+	int ret;
+	struct megasas_instance *instance;
+
+	instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+	if (instance->adapter_type != MFI_SERIES)
+		ret = megasas_reset_target_fusion(scmd);
+	else {
+		sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
+		ret = FAILED;
+	}
+
+	return ret;
+}
+
+/**
+ * megasas_bios_param - Returns disk geometry for a disk
+ * @sdev:		device handle
+ * @bdev:		block device
+ * @capacity:		drive capacity
+ * @geom:		geometry parameters
+ */
+static int
+megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+		 sector_t capacity, int geom[])
+{
+	int heads;
+	int sectors;
+	sector_t cylinders;
+	unsigned long tmp;
+
+	/* Default heads (64) & sectors (32) */
+	heads = 64;
+	sectors = 32;
+
+	tmp = heads * sectors;
+	cylinders = capacity;
+
+	sector_div(cylinders, tmp);
+
+	/*
+	 * Handle extended translation size for logical drives > 1Gb
+	 */
+
+	if (capacity >= 0x200000) {
+		heads = 255;
+		sectors = 63;
+		tmp = heads*sectors;
+		cylinders = capacity;
+		sector_div(cylinders, tmp);
+	}
+
+	geom[0] = heads;
+	geom[1] = sectors;
+	geom[2] = cylinders;
+
+	return 0;
+}
+
+static void megasas_aen_polling(struct work_struct *work);
+
+/**
+ * megasas_service_aen -	Processes an event notification
+ * @instance:			Adapter soft state
+ * @cmd:			AEN command completed by the ISR
+ *
+ * For AEN, driver sends a command down to FW that is held by the FW till an
+ * event occurs. When an event of interest occurs, FW completes the command
+ * that it was previously holding.
+ *
+ * This routines sends SIGIO signal to processes that have registered with the
+ * driver for AEN.
+ */
+static void
+megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+	unsigned long flags;
+
+	/*
+	 * Don't signal app if it is just an aborted previously registered aen
+	 */
+	if ((!cmd->abort_aen) && (instance->unload == 0)) {
+		spin_lock_irqsave(&poll_aen_lock, flags);
+		megasas_poll_wait_aen = 1;
+		spin_unlock_irqrestore(&poll_aen_lock, flags);
+		wake_up(&megasas_poll_wait);
+		kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
+	}
+	else
+		cmd->abort_aen = 0;
+
+	instance->aen_cmd = NULL;
+
+	megasas_return_cmd(instance, cmd);
+
+	if ((instance->unload == 0) &&
+		((instance->issuepend_done == 1))) {
+		struct megasas_aen_event *ev;
+
+		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
+		if (!ev) {
+			dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
+		} else {
+			ev->instance = instance;
+			instance->ev = ev;
+			INIT_DELAYED_WORK(&ev->hotplug_work,
+					  megasas_aen_polling);
+			schedule_delayed_work(&ev->hotplug_work, 0);
+		}
+	}
+}
+
+static ssize_t
+megasas_fw_crash_buffer_store(struct device *cdev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct megasas_instance *instance =
+		(struct megasas_instance *) shost->hostdata;
+	int val = 0;
+	unsigned long flags;
+
+	if (kstrtoint(buf, 0, &val) != 0)
+		return -EINVAL;
+
+	spin_lock_irqsave(&instance->crashdump_lock, flags);
+	instance->fw_crash_buffer_offset = val;
+	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+	return strlen(buf);
+}
+
+static ssize_t
+megasas_fw_crash_buffer_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct megasas_instance *instance =
+		(struct megasas_instance *) shost->hostdata;
+	u32 size;
+	unsigned long buff_addr;
+	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
+	unsigned long chunk_left_bytes;
+	unsigned long src_addr;
+	unsigned long flags;
+	u32 buff_offset;
+
+	spin_lock_irqsave(&instance->crashdump_lock, flags);
+	buff_offset = instance->fw_crash_buffer_offset;
+	if (!instance->crash_dump_buf &&
+		!((instance->fw_crash_state == AVAILABLE) ||
+		(instance->fw_crash_state == COPYING))) {
+		dev_err(&instance->pdev->dev,
+			"Firmware crash dump is not available\n");
+		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+		return -EINVAL;
+	}
+
+	buff_addr = (unsigned long) buf;
+
+	if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
+		dev_err(&instance->pdev->dev,
+			"Firmware crash dump offset is out of range\n");
+		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+		return 0;
+	}
+
+	size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
+	chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
+	size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
+	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
+
+	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
+		(buff_offset % dmachunk);
+	memcpy(buf, (void *)src_addr, size);
+	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+
+	return size;
+}
+
+static ssize_t
+megasas_fw_crash_buffer_size_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct megasas_instance *instance =
+		(struct megasas_instance *) shost->hostdata;
+
+	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
+		((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
+}
+
+static ssize_t
+megasas_fw_crash_state_store(struct device *cdev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct megasas_instance *instance =
+		(struct megasas_instance *) shost->hostdata;
+	int val = 0;
+	unsigned long flags;
+
+	if (kstrtoint(buf, 0, &val) != 0)
+		return -EINVAL;
+
+	if ((val <= AVAILABLE || val > COPY_ERROR)) {
+		dev_err(&instance->pdev->dev, "application updates invalid "
+			"firmware crash state\n");
+		return -EINVAL;
+	}
+
+	instance->fw_crash_state = val;
+
+	if ((val == COPIED) || (val == COPY_ERROR)) {
+		spin_lock_irqsave(&instance->crashdump_lock, flags);
+		megasas_free_host_crash_buffer(instance);
+		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+		if (val == COPY_ERROR)
+			dev_info(&instance->pdev->dev, "application failed to "
+				"copy Firmware crash dump\n");
+		else
+			dev_info(&instance->pdev->dev, "Firmware crash dump "
+				"copied successfully\n");
+	}
+	return strlen(buf);
+}
+
+static ssize_t
+megasas_fw_crash_state_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct megasas_instance *instance =
+		(struct megasas_instance *) shost->hostdata;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
+}
+
+static ssize_t
+megasas_page_size_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
+}
+
+static ssize_t
+megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
+	char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
+}
+
+static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
+	megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
+static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
+	megasas_fw_crash_buffer_size_show, NULL);
+static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
+	megasas_fw_crash_state_show, megasas_fw_crash_state_store);
+static DEVICE_ATTR(page_size, S_IRUGO,
+	megasas_page_size_show, NULL);
+static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
+	megasas_ldio_outstanding_show, NULL);
+
+struct device_attribute *megaraid_host_attrs[] = {
+	&dev_attr_fw_crash_buffer_size,
+	&dev_attr_fw_crash_buffer,
+	&dev_attr_fw_crash_state,
+	&dev_attr_page_size,
+	&dev_attr_ldio_outstanding,
+	NULL,
+};
+
+/*
+ * Scsi host template for megaraid_sas driver
+ */
+static struct scsi_host_template megasas_template = {
+
+	.module = THIS_MODULE,
+	.name = "Avago SAS based MegaRAID driver",
+	.proc_name = "megaraid_sas",
+	.slave_configure = megasas_slave_configure,
+	.slave_alloc = megasas_slave_alloc,
+	.slave_destroy = megasas_slave_destroy,
+	.queuecommand = megasas_queue_command,
+	.eh_target_reset_handler = megasas_reset_target,
+	.eh_abort_handler = megasas_task_abort,
+	.eh_host_reset_handler = megasas_reset_bus_host,
+	.eh_timed_out = megasas_reset_timer,
+	.shost_attrs = megaraid_host_attrs,
+	.bios_param = megasas_bios_param,
+	.use_clustering = ENABLE_CLUSTERING,
+	.change_queue_depth = scsi_change_queue_depth,
+	.no_write_same = 1,
+};
+
+/**
+ * megasas_complete_int_cmd -	Completes an internal command
+ * @instance:			Adapter soft state
+ * @cmd:			Command to be completed
+ *
+ * The megasas_issue_blocked_cmd() function waits for a command to complete
+ * after it issues a command. This function wakes up that waiting routine by
+ * calling wake_up() on the wait queue.
+ */
+static void
+megasas_complete_int_cmd(struct megasas_instance *instance,
+			 struct megasas_cmd *cmd)
+{
+	cmd->cmd_status_drv = cmd->frame->io.cmd_status;
+	wake_up(&instance->int_cmd_wait_q);
+}
+
+/**
+ * megasas_complete_abort -	Completes aborting a command
+ * @instance:			Adapter soft state
+ * @cmd:			Cmd that was issued to abort another cmd
+ *
+ * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
+ * after it issues an abort on a previously issued command. This function
+ * wakes up all functions waiting on the same wait queue.
+ */
+static void
+megasas_complete_abort(struct megasas_instance *instance,
+		       struct megasas_cmd *cmd)
+{
+	if (cmd->sync_cmd) {
+		cmd->sync_cmd = 0;
+		cmd->cmd_status_drv = 0;
+		wake_up(&instance->abort_cmd_wait_q);
+	}
+}
+
+/**
+ * megasas_complete_cmd -	Completes a command
+ * @instance:			Adapter soft state
+ * @cmd:			Command to be completed
+ * @alt_status:			If non-zero, use this value as status to
+ *				SCSI mid-layer instead of the value returned
+ *				by the FW. This should be used if caller wants
+ *				an alternate status (as in the case of aborted
+ *				commands)
+ */
+void
+megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
+		     u8 alt_status)
+{
+	int exception = 0;
+	struct megasas_header *hdr = &cmd->frame->hdr;
+	unsigned long flags;
+	struct fusion_context *fusion = instance->ctrl_context;
+	u32 opcode, status;
+
+	/* flag for the retry reset */
+	cmd->retry_for_fw_reset = 0;
+
+	if (cmd->scmd)
+		cmd->scmd->SCp.ptr = NULL;
+
+	switch (hdr->cmd) {
+	case MFI_CMD_INVALID:
+		/* Some older 1068 controller FW may keep a pended
+		   MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
+		   when booting the kdump kernel.  Ignore this command to
+		   prevent a kernel panic on shutdown of the kdump kernel. */
+		dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
+		       "completed\n");
+		dev_warn(&instance->pdev->dev, "If you have a controller "
+		       "other than PERC5, please upgrade your firmware\n");
+		break;
+	case MFI_CMD_PD_SCSI_IO:
+	case MFI_CMD_LD_SCSI_IO:
+
+		/*
+		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
+		 * issued either through an IO path or an IOCTL path. If it
+		 * was via IOCTL, we will send it to internal completion.
+		 */
+		if (cmd->sync_cmd) {
+			cmd->sync_cmd = 0;
+			megasas_complete_int_cmd(instance, cmd);
+			break;
+		}
+
+	case MFI_CMD_LD_READ:
+	case MFI_CMD_LD_WRITE:
+
+		if (alt_status) {
+			cmd->scmd->result = alt_status << 16;
+			exception = 1;
+		}
+
+		if (exception) {
+
+			atomic_dec(&instance->fw_outstanding);
+
+			scsi_dma_unmap(cmd->scmd);
+			cmd->scmd->scsi_done(cmd->scmd);
+			megasas_return_cmd(instance, cmd);
+
+			break;
+		}
+
+		switch (hdr->cmd_status) {
+
+		case MFI_STAT_OK:
+			cmd->scmd->result = DID_OK << 16;
+			break;
+
+		case MFI_STAT_SCSI_IO_FAILED:
+		case MFI_STAT_LD_INIT_IN_PROGRESS:
+			cmd->scmd->result =
+			    (DID_ERROR << 16) | hdr->scsi_status;
+			break;
+
+		case MFI_STAT_SCSI_DONE_WITH_ERROR:
+
+			cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
+
+			if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
+				memset(cmd->scmd->sense_buffer, 0,
+				       SCSI_SENSE_BUFFERSIZE);
+				memcpy(cmd->scmd->sense_buffer, cmd->sense,
+				       hdr->sense_len);
+
+				cmd->scmd->result |= DRIVER_SENSE << 24;
+			}
+
+			break;
+
+		case MFI_STAT_LD_OFFLINE:
+		case MFI_STAT_DEVICE_NOT_FOUND:
+			cmd->scmd->result = DID_BAD_TARGET << 16;
+			break;
+
+		default:
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
+			       hdr->cmd_status);
+			cmd->scmd->result = DID_ERROR << 16;
+			break;
+		}
+
+		atomic_dec(&instance->fw_outstanding);
+
+		scsi_dma_unmap(cmd->scmd);
+		cmd->scmd->scsi_done(cmd->scmd);
+		megasas_return_cmd(instance, cmd);
+
+		break;
+
+	case MFI_CMD_SMP:
+	case MFI_CMD_STP:
+	case MFI_CMD_DCMD:
+		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
+		/* Check for LD map update */
+		if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
+			&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
+			fusion->fast_path_io = 0;
+			spin_lock_irqsave(instance->host->host_lock, flags);
+			instance->map_update_cmd = NULL;
+			if (cmd->frame->hdr.cmd_status != 0) {
+				if (cmd->frame->hdr.cmd_status !=
+				    MFI_STAT_NOT_FOUND)
+					dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
+					       cmd->frame->hdr.cmd_status);
+				else {
+					megasas_return_cmd(instance, cmd);
+					spin_unlock_irqrestore(
+						instance->host->host_lock,
+						flags);
+					break;
+				}
+			} else
+				instance->map_id++;
+			megasas_return_cmd(instance, cmd);
+
+			/*
+			 * Set fast path IO to ZERO.
+			 * Validate Map will set proper value.
+			 * Meanwhile all IOs will go as LD IO.
+			 */
+			if (MR_ValidateMapInfo(instance))
+				fusion->fast_path_io = 1;
+			else
+				fusion->fast_path_io = 0;
+			megasas_sync_map_info(instance);
+			spin_unlock_irqrestore(instance->host->host_lock,
+					       flags);
+			break;
+		}
+		if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
+		    opcode == MR_DCMD_CTRL_EVENT_GET) {
+			spin_lock_irqsave(&poll_aen_lock, flags);
+			megasas_poll_wait_aen = 0;
+			spin_unlock_irqrestore(&poll_aen_lock, flags);
+		}
+
+		/* FW has an updated PD sequence */
+		if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
+			(cmd->frame->dcmd.mbox.b[0] == 1)) {
+
+			spin_lock_irqsave(instance->host->host_lock, flags);
+			status = cmd->frame->hdr.cmd_status;
+			instance->jbod_seq_cmd = NULL;
+			megasas_return_cmd(instance, cmd);
+
+			if (status == MFI_STAT_OK) {
+				instance->pd_seq_map_id++;
+				/* Re-register a pd sync seq num cmd */
+				if (megasas_sync_pd_seq_num(instance, true))
+					instance->use_seqnum_jbod_fp = false;
+			} else
+				instance->use_seqnum_jbod_fp = false;
+
+			spin_unlock_irqrestore(instance->host->host_lock, flags);
+			break;
+		}
+
+		/*
+		 * See if got an event notification
+		 */
+		if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
+			megasas_service_aen(instance, cmd);
+		else
+			megasas_complete_int_cmd(instance, cmd);
+
+		break;
+
+	case MFI_CMD_ABORT:
+		/*
+		 * Cmd issued to abort another cmd returned
+		 */
+		megasas_complete_abort(instance, cmd);
+		break;
+
+	default:
+		dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
+		       hdr->cmd);
+		break;
+	}
+}
+
+/**
+ * megasas_issue_pending_cmds_again -	issue all pending cmds
+ *					in FW again because of the fw reset
+ * @instance:				Adapter soft state
+ */
+static inline void
+megasas_issue_pending_cmds_again(struct megasas_instance *instance)
+{
+	struct megasas_cmd *cmd;
+	struct list_head clist_local;
+	union megasas_evt_class_locale class_locale;
+	unsigned long flags;
+	u32 seq_num;
+
+	INIT_LIST_HEAD(&clist_local);
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	list_splice_init(&instance->internal_reset_pending_q, &clist_local);
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+	while (!list_empty(&clist_local)) {
+		cmd = list_entry((&clist_local)->next,
+					struct megasas_cmd, list);
+		list_del_init(&cmd->list);
+
+		if (cmd->sync_cmd || cmd->scmd) {
+			dev_notice(&instance->pdev->dev, "command %p, %p:%d"
+				"detected to be pending while HBA reset\n",
+					cmd, cmd->scmd, cmd->sync_cmd);
+
+			cmd->retry_for_fw_reset++;
+
+			if (cmd->retry_for_fw_reset == 3) {
+				dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
+					"was tried multiple times during reset."
+					"Shutting down the HBA\n",
+					cmd, cmd->scmd, cmd->sync_cmd);
+				instance->instancet->disable_intr(instance);
+				atomic_set(&instance->fw_reset_no_pci_access, 1);
+				megaraid_sas_kill_hba(instance);
+				return;
+			}
+		}
+
+		if (cmd->sync_cmd == 1) {
+			if (cmd->scmd) {
+				dev_notice(&instance->pdev->dev, "unexpected"
+					"cmd attached to internal command!\n");
+			}
+			dev_notice(&instance->pdev->dev, "%p synchronous cmd"
+						"on the internal reset queue,"
+						"issue it again.\n", cmd);
+			cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
+			instance->instancet->fire_cmd(instance,
+							cmd->frame_phys_addr,
+							0, instance->reg_set);
+		} else if (cmd->scmd) {
+			dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
+			"detected on the internal queue, issue again.\n",
+			cmd, cmd->scmd->cmnd[0]);
+
+			atomic_inc(&instance->fw_outstanding);
+			instance->instancet->fire_cmd(instance,
+					cmd->frame_phys_addr,
+					cmd->frame_count-1, instance->reg_set);
+		} else {
+			dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
+				"internal reset defer list while re-issue!!\n",
+				cmd);
+		}
+	}
+
+	if (instance->aen_cmd) {
+		dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
+		megasas_return_cmd(instance, instance->aen_cmd);
+
+		instance->aen_cmd = NULL;
+	}
+
+	/*
+	 * Initiate AEN (Asynchronous Event Notification)
+	 */
+	seq_num = instance->last_seq_num;
+	class_locale.members.reserved = 0;
+	class_locale.members.locale = MR_EVT_LOCALE_ALL;
+	class_locale.members.class = MR_EVT_CLASS_DEBUG;
+
+	megasas_register_aen(instance, seq_num, class_locale.word);
+}
+
+/**
+ * Move the internal reset pending commands to a deferred queue.
+ *
+ * We move the commands pending at internal reset time to a
+ * pending queue. This queue would be flushed after successful
+ * completion of the internal reset sequence. if the internal reset
+ * did not complete in time, the kernel reset handler would flush
+ * these commands.
+ **/
+static void
+megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
+{
+	struct megasas_cmd *cmd;
+	int i;
+	u16 max_cmd = instance->max_fw_cmds;
+	u32 defer_index;
+	unsigned long flags;
+
+	defer_index = 0;
+	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
+	for (i = 0; i < max_cmd; i++) {
+		cmd = instance->cmd_list[i];
+		if (cmd->sync_cmd == 1 || cmd->scmd) {
+			dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
+					"on the defer queue as internal\n",
+				defer_index, cmd, cmd->sync_cmd, cmd->scmd);
+
+			if (!list_empty(&cmd->list)) {
+				dev_notice(&instance->pdev->dev, "ERROR while"
+					" moving this cmd:%p, %d %p, it was"
+					"discovered on some list?\n",
+					cmd, cmd->sync_cmd, cmd->scmd);
+
+				list_del_init(&cmd->list);
+			}
+			defer_index++;
+			list_add_tail(&cmd->list,
+				&instance->internal_reset_pending_q);
+		}
+	}
+	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
+}
+
+
+static void
+process_fw_state_change_wq(struct work_struct *work)
+{
+	struct megasas_instance *instance =
+		container_of(work, struct megasas_instance, work_init);
+	u32 wait;
+	unsigned long flags;
+
+    if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
+		dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
+				atomic_read(&instance->adprecovery));
+		return ;
+	}
+
+	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
+		dev_notice(&instance->pdev->dev, "FW detected to be in fault"
+					"state, restarting it...\n");
+
+		instance->instancet->disable_intr(instance);
+		atomic_set(&instance->fw_outstanding, 0);
+
+		atomic_set(&instance->fw_reset_no_pci_access, 1);
+		instance->instancet->adp_reset(instance, instance->reg_set);
+		atomic_set(&instance->fw_reset_no_pci_access, 0);
+
+		dev_notice(&instance->pdev->dev, "FW restarted successfully,"
+					"initiating next stage...\n");
+
+		dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
+					"state 2 starting...\n");
+
+		/* waiting for about 20 second before start the second init */
+		for (wait = 0; wait < 30; wait++) {
+			msleep(1000);
+		}
+
+		if (megasas_transition_to_ready(instance, 1)) {
+			dev_notice(&instance->pdev->dev, "adapter not ready\n");
+
+			atomic_set(&instance->fw_reset_no_pci_access, 1);
+			megaraid_sas_kill_hba(instance);
+			return ;
+		}
+
+		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
+			(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
+			(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
+			) {
+			*instance->consumer = *instance->producer;
+		} else {
+			*instance->consumer = 0;
+			*instance->producer = 0;
+		}
+
+		megasas_issue_init_mfi(instance);
+
+		spin_lock_irqsave(&instance->hba_lock, flags);
+		atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
+		spin_unlock_irqrestore(&instance->hba_lock, flags);
+		instance->instancet->enable_intr(instance);
+
+		megasas_issue_pending_cmds_again(instance);
+		instance->issuepend_done = 1;
+	}
+}
+
+/**
+ * megasas_deplete_reply_queue -	Processes all completed commands
+ * @instance:				Adapter soft state
+ * @alt_status:				Alternate status to be returned to
+ *					SCSI mid-layer instead of the status
+ *					returned by the FW
+ * Note: this must be called with hba lock held
+ */
+static int
+megasas_deplete_reply_queue(struct megasas_instance *instance,
+					u8 alt_status)
+{
+	u32 mfiStatus;
+	u32 fw_state;
+
+	if ((mfiStatus = instance->instancet->check_reset(instance,
+					instance->reg_set)) == 1) {
+		return IRQ_HANDLED;
+	}
+
+	if ((mfiStatus = instance->instancet->clear_intr(
+						instance->reg_set)
+						) == 0) {
+		/* Hardware may not set outbound_intr_status in MSI-X mode */
+		if (!instance->msix_vectors)
+			return IRQ_NONE;
+	}
+
+	instance->mfiStatus = mfiStatus;
+
+	if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
+		fw_state = instance->instancet->read_fw_status_reg(
+				instance->reg_set) & MFI_STATE_MASK;
+
+		if (fw_state != MFI_STATE_FAULT) {
+			dev_notice(&instance->pdev->dev, "fw state:%x\n",
+						fw_state);
+		}
+
+		if ((fw_state == MFI_STATE_FAULT) &&
+				(instance->disableOnlineCtrlReset == 0)) {
+			dev_notice(&instance->pdev->dev, "wait adp restart\n");
+
+			if ((instance->pdev->device ==
+					PCI_DEVICE_ID_LSI_SAS1064R) ||
+				(instance->pdev->device ==
+					PCI_DEVICE_ID_DELL_PERC5) ||
+				(instance->pdev->device ==
+					PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
+
+				*instance->consumer =
+					cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
+			}
+
+
+			instance->instancet->disable_intr(instance);
+			atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
+			instance->issuepend_done = 0;
+
+			atomic_set(&instance->fw_outstanding, 0);
+			megasas_internal_reset_defer_cmds(instance);
+
+			dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
+					fw_state, atomic_read(&instance->adprecovery));
+
+			schedule_work(&instance->work_init);
+			return IRQ_HANDLED;
+
+		} else {
+			dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
+				fw_state, instance->disableOnlineCtrlReset);
+		}
+	}
+
+	tasklet_schedule(&instance->isr_tasklet);
+	return IRQ_HANDLED;
+}
+/**
+ * megasas_isr - isr entry point
+ */
+static irqreturn_t megasas_isr(int irq, void *devp)
+{
+	struct megasas_irq_context *irq_context = devp;
+	struct megasas_instance *instance = irq_context->instance;
+	unsigned long flags;
+	irqreturn_t rc;
+
+	if (atomic_read(&instance->fw_reset_no_pci_access))
+		return IRQ_HANDLED;
+
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	rc = megasas_deplete_reply_queue(instance, DID_OK);
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+	return rc;
+}
+
+/**
+ * megasas_transition_to_ready -	Move the FW to READY state
+ * @instance:				Adapter soft state
+ *
+ * During the initialization, FW passes can potentially be in any one of
+ * several possible states. If the FW in operational, waiting-for-handshake
+ * states, driver must take steps to bring it to ready state. Otherwise, it
+ * has to wait for the ready state.
+ */
+int
+megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
+{
+	int i;
+	u8 max_wait;
+	u32 fw_state;
+	u32 cur_state;
+	u32 abs_state, curr_abs_state;
+
+	abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
+	fw_state = abs_state & MFI_STATE_MASK;
+
+	if (fw_state != MFI_STATE_READY)
+		dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
+		       " state\n");
+
+	while (fw_state != MFI_STATE_READY) {
+
+		switch (fw_state) {
+
+		case MFI_STATE_FAULT:
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n");
+			if (ocr) {
+				max_wait = MEGASAS_RESET_WAIT_TIME;
+				cur_state = MFI_STATE_FAULT;
+				break;
+			} else
+				return -ENODEV;
+
+		case MFI_STATE_WAIT_HANDSHAKE:
+			/*
+			 * Set the CLR bit in inbound doorbell
+			 */
+			if ((instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+				(instance->pdev->device ==
+				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+				(instance->adapter_type != MFI_SERIES))
+				writel(
+				  MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
+				  &instance->reg_set->doorbell);
+			else
+				writel(
+				    MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
+					&instance->reg_set->inbound_doorbell);
+
+			max_wait = MEGASAS_RESET_WAIT_TIME;
+			cur_state = MFI_STATE_WAIT_HANDSHAKE;
+			break;
+
+		case MFI_STATE_BOOT_MESSAGE_PENDING:
+			if ((instance->pdev->device ==
+			     PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+				(instance->pdev->device ==
+				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+				(instance->adapter_type != MFI_SERIES))
+				writel(MFI_INIT_HOTPLUG,
+				       &instance->reg_set->doorbell);
+			else
+				writel(MFI_INIT_HOTPLUG,
+					&instance->reg_set->inbound_doorbell);
+
+			max_wait = MEGASAS_RESET_WAIT_TIME;
+			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
+			break;
+
+		case MFI_STATE_OPERATIONAL:
+			/*
+			 * Bring it to READY state; assuming max wait 10 secs
+			 */
+			instance->instancet->disable_intr(instance);
+			if ((instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+				(instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_SAS0071SKINNY)  ||
+				(instance->adapter_type != MFI_SERIES)) {
+				writel(MFI_RESET_FLAGS,
+					&instance->reg_set->doorbell);
+
+				if (instance->adapter_type != MFI_SERIES) {
+					for (i = 0; i < (10 * 1000); i += 20) {
+						if (readl(
+							    &instance->
+							    reg_set->
+							    doorbell) & 1)
+							msleep(20);
+						else
+							break;
+					}
+				}
+			} else
+				writel(MFI_RESET_FLAGS,
+					&instance->reg_set->inbound_doorbell);
+
+			max_wait = MEGASAS_RESET_WAIT_TIME;
+			cur_state = MFI_STATE_OPERATIONAL;
+			break;
+
+		case MFI_STATE_UNDEFINED:
+			/*
+			 * This state should not last for more than 2 seconds
+			 */
+			max_wait = MEGASAS_RESET_WAIT_TIME;
+			cur_state = MFI_STATE_UNDEFINED;
+			break;
+
+		case MFI_STATE_BB_INIT:
+			max_wait = MEGASAS_RESET_WAIT_TIME;
+			cur_state = MFI_STATE_BB_INIT;
+			break;
+
+		case MFI_STATE_FW_INIT:
+			max_wait = MEGASAS_RESET_WAIT_TIME;
+			cur_state = MFI_STATE_FW_INIT;
+			break;
+
+		case MFI_STATE_FW_INIT_2:
+			max_wait = MEGASAS_RESET_WAIT_TIME;
+			cur_state = MFI_STATE_FW_INIT_2;
+			break;
+
+		case MFI_STATE_DEVICE_SCAN:
+			max_wait = MEGASAS_RESET_WAIT_TIME;
+			cur_state = MFI_STATE_DEVICE_SCAN;
+			break;
+
+		case MFI_STATE_FLUSH_CACHE:
+			max_wait = MEGASAS_RESET_WAIT_TIME;
+			cur_state = MFI_STATE_FLUSH_CACHE;
+			break;
+
+		default:
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
+			       fw_state);
+			return -ENODEV;
+		}
+
+		/*
+		 * The cur_state should not last for more than max_wait secs
+		 */
+		for (i = 0; i < max_wait * 50; i++) {
+			curr_abs_state = instance->instancet->
+				read_fw_status_reg(instance->reg_set);
+
+			if (abs_state == curr_abs_state) {
+				msleep(20);
+			} else
+				break;
+		}
+
+		/*
+		 * Return error if fw_state hasn't changed after max_wait
+		 */
+		if (curr_abs_state == abs_state) {
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
+			       "in %d secs\n", fw_state, max_wait);
+			return -ENODEV;
+		}
+
+		abs_state = curr_abs_state;
+		fw_state = curr_abs_state & MFI_STATE_MASK;
+	}
+	dev_info(&instance->pdev->dev, "FW now in Ready state\n");
+
+	return 0;
+}
+
+/**
+ * megasas_teardown_frame_pool -	Destroy the cmd frame DMA pool
+ * @instance:				Adapter soft state
+ */
+static void megasas_teardown_frame_pool(struct megasas_instance *instance)
+{
+	int i;
+	u16 max_cmd = instance->max_mfi_cmds;
+	struct megasas_cmd *cmd;
+
+	if (!instance->frame_dma_pool)
+		return;
+
+	/*
+	 * Return all frames to pool
+	 */
+	for (i = 0; i < max_cmd; i++) {
+
+		cmd = instance->cmd_list[i];
+
+		if (cmd->frame)
+			dma_pool_free(instance->frame_dma_pool, cmd->frame,
+				      cmd->frame_phys_addr);
+
+		if (cmd->sense)
+			dma_pool_free(instance->sense_dma_pool, cmd->sense,
+				      cmd->sense_phys_addr);
+	}
+
+	/*
+	 * Now destroy the pool itself
+	 */
+	dma_pool_destroy(instance->frame_dma_pool);
+	dma_pool_destroy(instance->sense_dma_pool);
+
+	instance->frame_dma_pool = NULL;
+	instance->sense_dma_pool = NULL;
+}
+
+/**
+ * megasas_create_frame_pool -	Creates DMA pool for cmd frames
+ * @instance:			Adapter soft state
+ *
+ * Each command packet has an embedded DMA memory buffer that is used for
+ * filling MFI frame and the SG list that immediately follows the frame. This
+ * function creates those DMA memory buffers for each command packet by using
+ * PCI pool facility.
+ */
+static int megasas_create_frame_pool(struct megasas_instance *instance)
+{
+	int i;
+	u16 max_cmd;
+	u32 sge_sz;
+	u32 frame_count;
+	struct megasas_cmd *cmd;
+
+	max_cmd = instance->max_mfi_cmds;
+
+	/*
+	 * Size of our frame is 64 bytes for MFI frame, followed by max SG
+	 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
+	 */
+	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
+	    sizeof(struct megasas_sge32);
+
+	if (instance->flag_ieee)
+		sge_sz = sizeof(struct megasas_sge_skinny);
+
+	/*
+	 * For MFI controllers.
+	 * max_num_sge = 60
+	 * max_sge_sz  = 16 byte (sizeof megasas_sge_skinny)
+	 * Total 960 byte (15 MFI frame of 64 byte)
+	 *
+	 * Fusion adapter require only 3 extra frame.
+	 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
+	 * max_sge_sz  = 12 byte (sizeof  megasas_sge64)
+	 * Total 192 byte (3 MFI frame of 64 byte)
+	 */
+	frame_count = (instance->adapter_type == MFI_SERIES) ?
+			(15 + 1) : (3 + 1);
+	instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
+	/*
+	 * Use DMA pool facility provided by PCI layer
+	 */
+	instance->frame_dma_pool = dma_pool_create("megasas frame pool",
+					&instance->pdev->dev,
+					instance->mfi_frame_size, 256, 0);
+
+	if (!instance->frame_dma_pool) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
+		return -ENOMEM;
+	}
+
+	instance->sense_dma_pool = dma_pool_create("megasas sense pool",
+						   &instance->pdev->dev, 128,
+						   4, 0);
+
+	if (!instance->sense_dma_pool) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
+
+		dma_pool_destroy(instance->frame_dma_pool);
+		instance->frame_dma_pool = NULL;
+
+		return -ENOMEM;
+	}
+
+	/*
+	 * Allocate and attach a frame to each of the commands in cmd_list.
+	 * By making cmd->index as the context instead of the &cmd, we can
+	 * always use 32bit context regardless of the architecture
+	 */
+	for (i = 0; i < max_cmd; i++) {
+
+		cmd = instance->cmd_list[i];
+
+		cmd->frame = dma_pool_alloc(instance->frame_dma_pool,
+					    GFP_KERNEL, &cmd->frame_phys_addr);
+
+		cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
+					    GFP_KERNEL, &cmd->sense_phys_addr);
+
+		/*
+		 * megasas_teardown_frame_pool() takes care of freeing
+		 * whatever has been allocated
+		 */
+		if (!cmd->frame || !cmd->sense) {
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
+			megasas_teardown_frame_pool(instance);
+			return -ENOMEM;
+		}
+
+		memset(cmd->frame, 0, instance->mfi_frame_size);
+		cmd->frame->io.context = cpu_to_le32(cmd->index);
+		cmd->frame->io.pad_0 = 0;
+		if ((instance->adapter_type == MFI_SERIES) && reset_devices)
+			cmd->frame->hdr.cmd = MFI_CMD_INVALID;
+	}
+
+	return 0;
+}
+
+/**
+ * megasas_free_cmds -	Free all the cmds in the free cmd pool
+ * @instance:		Adapter soft state
+ */
+void megasas_free_cmds(struct megasas_instance *instance)
+{
+	int i;
+
+	/* First free the MFI frame pool */
+	megasas_teardown_frame_pool(instance);
+
+	/* Free all the commands in the cmd_list */
+	for (i = 0; i < instance->max_mfi_cmds; i++)
+
+		kfree(instance->cmd_list[i]);
+
+	/* Free the cmd_list buffer itself */
+	kfree(instance->cmd_list);
+	instance->cmd_list = NULL;
+
+	INIT_LIST_HEAD(&instance->cmd_pool);
+}
+
+/**
+ * megasas_alloc_cmds -	Allocates the command packets
+ * @instance:		Adapter soft state
+ *
+ * Each command that is issued to the FW, whether IO commands from the OS or
+ * internal commands like IOCTLs, are wrapped in local data structure called
+ * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
+ * the FW.
+ *
+ * Each frame has a 32-bit field called context (tag). This context is used
+ * to get back the megasas_cmd from the frame when a frame gets completed in
+ * the ISR. Typically the address of the megasas_cmd itself would be used as
+ * the context. But we wanted to keep the differences between 32 and 64 bit
+ * systems to the mininum. We always use 32 bit integers for the context. In
+ * this driver, the 32 bit values are the indices into an array cmd_list.
+ * This array is used only to look up the megasas_cmd given the context. The
+ * free commands themselves are maintained in a linked list called cmd_pool.
+ */
+int megasas_alloc_cmds(struct megasas_instance *instance)
+{
+	int i;
+	int j;
+	u16 max_cmd;
+	struct megasas_cmd *cmd;
+	struct fusion_context *fusion;
+
+	fusion = instance->ctrl_context;
+	max_cmd = instance->max_mfi_cmds;
+
+	/*
+	 * instance->cmd_list is an array of struct megasas_cmd pointers.
+	 * Allocate the dynamic array first and then allocate individual
+	 * commands.
+	 */
+	instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
+
+	if (!instance->cmd_list) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
+		return -ENOMEM;
+	}
+
+	memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
+
+	for (i = 0; i < max_cmd; i++) {
+		instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
+						GFP_KERNEL);
+
+		if (!instance->cmd_list[i]) {
+
+			for (j = 0; j < i; j++)
+				kfree(instance->cmd_list[j]);
+
+			kfree(instance->cmd_list);
+			instance->cmd_list = NULL;
+
+			return -ENOMEM;
+		}
+	}
+
+	for (i = 0; i < max_cmd; i++) {
+		cmd = instance->cmd_list[i];
+		memset(cmd, 0, sizeof(struct megasas_cmd));
+		cmd->index = i;
+		cmd->scmd = NULL;
+		cmd->instance = instance;
+
+		list_add_tail(&cmd->list, &instance->cmd_pool);
+	}
+
+	/*
+	 * Create a frame pool and assign one frame to each cmd
+	 */
+	if (megasas_create_frame_pool(instance)) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
+		megasas_free_cmds(instance);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/*
+ * dcmd_timeout_ocr_possible -	Check if OCR is possible based on Driver/FW state.
+ * @instance:				Adapter soft state
+ *
+ * Return 0 for only Fusion adapter, if driver load/unload is not in progress
+ * or FW is not under OCR.
+ */
+inline int
+dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
+
+	if (instance->adapter_type == MFI_SERIES)
+		return KILL_ADAPTER;
+	else if (instance->unload ||
+			test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE,
+				 &instance->reset_flags))
+		return IGNORE_TIMEOUT;
+	else
+		return INITIATE_OCR;
+}
+
+static void
+megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
+{
+	int ret;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+
+	struct MR_PRIV_DEVICE *mr_device_priv_data;
+	u16 device_id = 0;
+
+	device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
+		return;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	memset(instance->pd_info, 0, sizeof(*instance->pd_info));
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->mbox.s[0] = cpu_to_le16(device_id);
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0xFF;
+	dcmd->sge_count = 1;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
+	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO));
+
+	if ((instance->adapter_type != MFI_SERIES) &&
+	    !instance->mask_interrupts)
+		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
+	else
+		ret = megasas_issue_polled(instance, cmd);
+
+	switch (ret) {
+	case DCMD_SUCCESS:
+		mr_device_priv_data = sdev->hostdata;
+		le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
+		mr_device_priv_data->interface_type =
+				instance->pd_info->state.ddf.pdType.intf;
+		break;
+
+	case DCMD_TIMEOUT:
+
+		switch (dcmd_timeout_ocr_possible(instance)) {
+		case INITIATE_OCR:
+			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+			megasas_reset_fusion(instance->host,
+				MFI_IO_TIMEOUT_OCR);
+			break;
+		case KILL_ADAPTER:
+			megaraid_sas_kill_hba(instance);
+			break;
+		case IGNORE_TIMEOUT:
+			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+				__func__, __LINE__);
+			break;
+		}
+
+		break;
+	}
+
+	if (ret != DCMD_TIMEOUT)
+		megasas_return_cmd(instance, cmd);
+
+	return;
+}
+/*
+ * megasas_get_pd_list_info -	Returns FW's pd_list structure
+ * @instance:				Adapter soft state
+ * @pd_list:				pd_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure.  This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_get_pd_list(struct megasas_instance *instance)
+{
+	int ret = 0, pd_index = 0;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct MR_PD_LIST *ci;
+	struct MR_PD_ADDRESS *pd_addr;
+	dma_addr_t ci_h = 0;
+
+	if (instance->pd_list_not_supported) {
+		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
+		"not supported by firmware\n");
+		return ret;
+	}
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	ci = pci_alloc_consistent(instance->pdev,
+		  MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
+
+	if (!ci) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for pd_list\n");
+		megasas_return_cmd(instance, cmd);
+		return -ENOMEM;
+	}
+
+	memset(ci, 0, sizeof(*ci));
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
+	dcmd->mbox.b[1] = 0;
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
+	dcmd->sge_count = 1;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
+	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
+
+	if ((instance->adapter_type != MFI_SERIES) &&
+	    !instance->mask_interrupts)
+		ret = megasas_issue_blocked_cmd(instance, cmd,
+			MFI_IO_TIMEOUT_SECS);
+	else
+		ret = megasas_issue_polled(instance, cmd);
+
+	switch (ret) {
+	case DCMD_FAILED:
+		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
+			"failed/not supported by firmware\n");
+
+		if (instance->adapter_type != MFI_SERIES)
+			megaraid_sas_kill_hba(instance);
+		else
+			instance->pd_list_not_supported = 1;
+		break;
+	case DCMD_TIMEOUT:
+
+		switch (dcmd_timeout_ocr_possible(instance)) {
+		case INITIATE_OCR:
+			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+			/*
+			 * DCMD failed from AEN path.
+			 * AEN path already hold reset_mutex to avoid PCI access
+			 * while OCR is in progress.
+			 */
+			mutex_unlock(&instance->reset_mutex);
+			megasas_reset_fusion(instance->host,
+						MFI_IO_TIMEOUT_OCR);
+			mutex_lock(&instance->reset_mutex);
+			break;
+		case KILL_ADAPTER:
+			megaraid_sas_kill_hba(instance);
+			break;
+		case IGNORE_TIMEOUT:
+			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
+				__func__, __LINE__);
+			break;
+		}
+
+		break;
+
+	case DCMD_SUCCESS:
+		pd_addr = ci->addr;
+
+		if ((le32_to_cpu(ci->count) >
+			(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
+			break;
+
+		memset(instance->local_pd_list, 0,
+				MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
+
+		for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
+			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid	=
+					le16_to_cpu(pd_addr->deviceId);
+			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType	=
+					pd_addr->scsiDevType;
+			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState	=
+					MR_PD_STATE_SYSTEM;
+			pd_addr++;
+		}
+
+		memcpy(instance->pd_list, instance->local_pd_list,
+			sizeof(instance->pd_list));
+		break;
+
+	}
+
+	pci_free_consistent(instance->pdev,
+				MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
+				ci, ci_h);
+
+	if (ret != DCMD_TIMEOUT)
+		megasas_return_cmd(instance, cmd);
+
+	return ret;
+}
+
+/*
+ * megasas_get_ld_list_info -	Returns FW's ld_list structure
+ * @instance:				Adapter soft state
+ * @ld_list:				ld_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure.  This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_get_ld_list(struct megasas_instance *instance)
+{
+	int ret = 0, ld_index = 0, ids = 0;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct MR_LD_LIST *ci;
+	dma_addr_t ci_h = 0;
+	u32 ld_count;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	ci = pci_alloc_consistent(instance->pdev,
+				sizeof(struct MR_LD_LIST),
+				&ci_h);
+
+	if (!ci) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem in get_ld_list\n");
+		megasas_return_cmd(instance, cmd);
+		return -ENOMEM;
+	}
+
+	memset(ci, 0, sizeof(*ci));
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	if (instance->supportmax256vd)
+		dcmd->mbox.b[0] = 1;
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
+	dcmd->sge_count = 1;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+	dcmd->timeout = 0;
+	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
+	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
+	dcmd->pad_0  = 0;
+
+	if ((instance->adapter_type != MFI_SERIES) &&
+	    !instance->mask_interrupts)
+		ret = megasas_issue_blocked_cmd(instance, cmd,
+			MFI_IO_TIMEOUT_SECS);
+	else
+		ret = megasas_issue_polled(instance, cmd);
+
+	ld_count = le32_to_cpu(ci->ldCount);
+
+	switch (ret) {
+	case DCMD_FAILED:
+		megaraid_sas_kill_hba(instance);
+		break;
+	case DCMD_TIMEOUT:
+
+		switch (dcmd_timeout_ocr_possible(instance)) {
+		case INITIATE_OCR:
+			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+			/*
+			 * DCMD failed from AEN path.
+			 * AEN path already hold reset_mutex to avoid PCI access
+			 * while OCR is in progress.
+			 */
+			mutex_unlock(&instance->reset_mutex);
+			megasas_reset_fusion(instance->host,
+						MFI_IO_TIMEOUT_OCR);
+			mutex_lock(&instance->reset_mutex);
+			break;
+		case KILL_ADAPTER:
+			megaraid_sas_kill_hba(instance);
+			break;
+		case IGNORE_TIMEOUT:
+			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+				__func__, __LINE__);
+			break;
+		}
+
+		break;
+
+	case DCMD_SUCCESS:
+		if (ld_count > instance->fw_supported_vd_count)
+			break;
+
+		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
+
+		for (ld_index = 0; ld_index < ld_count; ld_index++) {
+			if (ci->ldList[ld_index].state != 0) {
+				ids = ci->ldList[ld_index].ref.targetId;
+				instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
+			}
+		}
+
+		break;
+	}
+
+	pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h);
+
+	if (ret != DCMD_TIMEOUT)
+		megasas_return_cmd(instance, cmd);
+
+	return ret;
+}
+
+/**
+ * megasas_ld_list_query -	Returns FW's ld_list structure
+ * @instance:				Adapter soft state
+ * @ld_list:				ld_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure.  This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
+{
+	int ret = 0, ld_index = 0, ids = 0;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct MR_LD_TARGETID_LIST *ci;
+	dma_addr_t ci_h = 0;
+	u32 tgtid_count;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		dev_warn(&instance->pdev->dev,
+		         "megasas_ld_list_query: Failed to get cmd\n");
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	ci = pci_alloc_consistent(instance->pdev,
+				  sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
+
+	if (!ci) {
+		dev_warn(&instance->pdev->dev,
+		         "Failed to alloc mem for ld_list_query\n");
+		megasas_return_cmd(instance, cmd);
+		return -ENOMEM;
+	}
+
+	memset(ci, 0, sizeof(*ci));
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->mbox.b[0] = query_type;
+	if (instance->supportmax256vd)
+		dcmd->mbox.b[2] = 1;
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
+	dcmd->sge_count = 1;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+	dcmd->timeout = 0;
+	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
+	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
+	dcmd->pad_0  = 0;
+
+	if ((instance->adapter_type != MFI_SERIES) &&
+	    !instance->mask_interrupts)
+		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
+	else
+		ret = megasas_issue_polled(instance, cmd);
+
+	switch (ret) {
+	case DCMD_FAILED:
+		dev_info(&instance->pdev->dev,
+			"DCMD not supported by firmware - %s %d\n",
+				__func__, __LINE__);
+		ret = megasas_get_ld_list(instance);
+		break;
+	case DCMD_TIMEOUT:
+		switch (dcmd_timeout_ocr_possible(instance)) {
+		case INITIATE_OCR:
+			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+			/*
+			 * DCMD failed from AEN path.
+			 * AEN path already hold reset_mutex to avoid PCI access
+			 * while OCR is in progress.
+			 */
+			mutex_unlock(&instance->reset_mutex);
+			megasas_reset_fusion(instance->host,
+						MFI_IO_TIMEOUT_OCR);
+			mutex_lock(&instance->reset_mutex);
+			break;
+		case KILL_ADAPTER:
+			megaraid_sas_kill_hba(instance);
+			break;
+		case IGNORE_TIMEOUT:
+			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+				__func__, __LINE__);
+			break;
+		}
+
+		break;
+	case DCMD_SUCCESS:
+		tgtid_count = le32_to_cpu(ci->count);
+
+		if ((tgtid_count > (instance->fw_supported_vd_count)))
+			break;
+
+		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+		for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
+			ids = ci->targetId[ld_index];
+			instance->ld_ids[ids] = ci->targetId[ld_index];
+		}
+
+		break;
+	}
+
+	pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
+		    ci, ci_h);
+
+	if (ret != DCMD_TIMEOUT)
+		megasas_return_cmd(instance, cmd);
+
+	return ret;
+}
+
+/*
+ * megasas_update_ext_vd_details : Update details w.r.t Extended VD
+ * instance			 : Controller's instance
+*/
+static void megasas_update_ext_vd_details(struct megasas_instance *instance)
+{
+	struct fusion_context *fusion;
+	u32 ventura_map_sz = 0;
+
+	fusion = instance->ctrl_context;
+	/* For MFI based controllers return dummy success */
+	if (!fusion)
+		return;
+
+	instance->supportmax256vd =
+		instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
+	/* Below is additional check to address future FW enhancement */
+	if (instance->ctrl_info->max_lds > 64)
+		instance->supportmax256vd = 1;
+
+	instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
+					* MEGASAS_MAX_DEV_PER_CHANNEL;
+	instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
+					* MEGASAS_MAX_DEV_PER_CHANNEL;
+	if (instance->supportmax256vd) {
+		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
+		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
+	} else {
+		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
+		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
+	}
+
+	dev_info(&instance->pdev->dev,
+		"firmware type\t: %s\n",
+		instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
+		"Legacy(64 VD) firmware");
+
+	if (instance->max_raid_mapsize) {
+		ventura_map_sz = instance->max_raid_mapsize *
+						MR_MIN_MAP_SIZE; /* 64k */
+		fusion->current_map_sz = ventura_map_sz;
+		fusion->max_map_sz = ventura_map_sz;
+	} else {
+		fusion->old_map_sz =  sizeof(struct MR_FW_RAID_MAP) +
+					(sizeof(struct MR_LD_SPAN_MAP) *
+					(instance->fw_supported_vd_count - 1));
+		fusion->new_map_sz =  sizeof(struct MR_FW_RAID_MAP_EXT);
+
+		fusion->max_map_sz =
+			max(fusion->old_map_sz, fusion->new_map_sz);
+
+		if (instance->supportmax256vd)
+			fusion->current_map_sz = fusion->new_map_sz;
+		else
+			fusion->current_map_sz = fusion->old_map_sz;
+	}
+	/* irrespective of FW raid maps, driver raid map is constant */
+	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
+}
+
+/**
+ * megasas_get_controller_info -	Returns FW's controller structure
+ * @instance:				Adapter soft state
+ *
+ * Issues an internal command (DCMD) to get the FW's controller structure.
+ * This information is mainly used to find out the maximum IO transfer per
+ * command supported by the FW.
+ */
+int
+megasas_get_ctrl_info(struct megasas_instance *instance)
+{
+	int ret = 0;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct megasas_ctrl_info *ci;
+	struct megasas_ctrl_info *ctrl_info;
+	dma_addr_t ci_h = 0;
+
+	ctrl_info = instance->ctrl_info;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	ci = pci_alloc_consistent(instance->pdev,
+				  sizeof(struct megasas_ctrl_info), &ci_h);
+
+	if (!ci) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ctrl info\n");
+		megasas_return_cmd(instance, cmd);
+		return -ENOMEM;
+	}
+
+	memset(ci, 0, sizeof(*ci));
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
+	dcmd->sge_count = 1;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
+	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
+	dcmd->mbox.b[0] = 1;
+
+	if ((instance->adapter_type != MFI_SERIES) &&
+	    !instance->mask_interrupts)
+		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
+	else
+		ret = megasas_issue_polled(instance, cmd);
+
+	switch (ret) {
+	case DCMD_SUCCESS:
+		memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
+		/* Save required controller information in
+		 * CPU endianness format.
+		 */
+		le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
+		le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
+		le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
+		le16_to_cpus((u16 *)&ctrl_info->adapter_operations4);
+
+		/* Update the latest Ext VD info.
+		 * From Init path, store current firmware details.
+		 * From OCR path, detect any firmware properties changes.
+		 * in case of Firmware upgrade without system reboot.
+		 */
+		megasas_update_ext_vd_details(instance);
+		instance->use_seqnum_jbod_fp =
+			ctrl_info->adapterOperations3.useSeqNumJbodFP;
+		instance->support_morethan256jbod =
+			ctrl_info->adapter_operations4.support_pd_map_target_id;
+
+		/*Check whether controller is iMR or MR */
+		instance->is_imr = (ctrl_info->memory_size ? 0 : 1);
+		dev_info(&instance->pdev->dev,
+			"controller type\t: %s(%dMB)\n",
+			instance->is_imr ? "iMR" : "MR",
+			le16_to_cpu(ctrl_info->memory_size));
+
+		instance->disableOnlineCtrlReset =
+			ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+		instance->secure_jbod_support =
+			ctrl_info->adapterOperations3.supportSecurityonJBOD;
+		dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
+			instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
+		dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
+			instance->secure_jbod_support ? "Yes" : "No");
+		break;
+
+	case DCMD_TIMEOUT:
+		switch (dcmd_timeout_ocr_possible(instance)) {
+		case INITIATE_OCR:
+			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+			megasas_reset_fusion(instance->host,
+				MFI_IO_TIMEOUT_OCR);
+			break;
+		case KILL_ADAPTER:
+			megaraid_sas_kill_hba(instance);
+			break;
+		case IGNORE_TIMEOUT:
+			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+				__func__, __LINE__);
+			break;
+		}
+	case DCMD_FAILED:
+		megaraid_sas_kill_hba(instance);
+		break;
+
+	}
+
+	pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
+			    ci, ci_h);
+
+	megasas_return_cmd(instance, cmd);
+
+
+	return ret;
+}
+
+/*
+ * megasas_set_crash_dump_params -	Sends address of crash dump DMA buffer
+ *					to firmware
+ *
+ * @instance:				Adapter soft state
+ * @crash_buf_state		-	tell FW to turn ON/OFF crash dump feature
+					MR_CRASH_BUF_TURN_OFF = 0
+					MR_CRASH_BUF_TURN_ON = 1
+ * @return 0 on success non-zero on failure.
+ * Issues an internal command (DCMD) to set parameters for crash dump feature.
+ * Driver will send address of crash dump DMA buffer and set mbox to tell FW
+ * that driver supports crash dump feature. This DCMD will be sent only if
+ * crash dump feature is supported by the FW.
+ *
+ */
+int megasas_set_crash_dump_params(struct megasas_instance *instance,
+	u8 crash_buf_state)
+{
+	int ret = 0;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
+		return -ENOMEM;
+	}
+
+
+	dcmd = &cmd->frame->dcmd;
+
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+	dcmd->mbox.b[0] = crash_buf_state;
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
+	dcmd->sge_count = 1;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
+	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
+
+	if ((instance->adapter_type != MFI_SERIES) &&
+	    !instance->mask_interrupts)
+		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
+	else
+		ret = megasas_issue_polled(instance, cmd);
+
+	if (ret == DCMD_TIMEOUT) {
+		switch (dcmd_timeout_ocr_possible(instance)) {
+		case INITIATE_OCR:
+			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+			megasas_reset_fusion(instance->host,
+					MFI_IO_TIMEOUT_OCR);
+			break;
+		case KILL_ADAPTER:
+			megaraid_sas_kill_hba(instance);
+			break;
+		case IGNORE_TIMEOUT:
+			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+				__func__, __LINE__);
+			break;
+		}
+	} else
+		megasas_return_cmd(instance, cmd);
+
+	return ret;
+}
+
+/**
+ * megasas_issue_init_mfi -	Initializes the FW
+ * @instance:		Adapter soft state
+ *
+ * Issues the INIT MFI cmd
+ */
+static int
+megasas_issue_init_mfi(struct megasas_instance *instance)
+{
+	__le32 context;
+	struct megasas_cmd *cmd;
+	struct megasas_init_frame *init_frame;
+	struct megasas_init_queue_info *initq_info;
+	dma_addr_t init_frame_h;
+	dma_addr_t initq_info_h;
+
+	/*
+	 * Prepare a init frame. Note the init frame points to queue info
+	 * structure. Each frame has SGL allocated after first 64 bytes. For
+	 * this frame - since we don't need any SGL - we use SGL's space as
+	 * queue info structure
+	 *
+	 * We will not get a NULL command below. We just created the pool.
+	 */
+	cmd = megasas_get_cmd(instance);
+
+	init_frame = (struct megasas_init_frame *)cmd->frame;
+	initq_info = (struct megasas_init_queue_info *)
+		((unsigned long)init_frame + 64);
+
+	init_frame_h = cmd->frame_phys_addr;
+	initq_info_h = init_frame_h + 64;
+
+	context = init_frame->context;
+	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
+	memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
+	init_frame->context = context;
+
+	initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
+	initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
+
+	initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
+	initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
+
+	init_frame->cmd = MFI_CMD_INIT;
+	init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
+	init_frame->queue_info_new_phys_addr_lo =
+		cpu_to_le32(lower_32_bits(initq_info_h));
+	init_frame->queue_info_new_phys_addr_hi =
+		cpu_to_le32(upper_32_bits(initq_info_h));
+
+	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
+
+	/*
+	 * disable the intr before firing the init frame to FW
+	 */
+	instance->instancet->disable_intr(instance);
+
+	/*
+	 * Issue the init frame in polled mode
+	 */
+
+	if (megasas_issue_polled(instance, cmd)) {
+		dev_err(&instance->pdev->dev, "Failed to init firmware\n");
+		megasas_return_cmd(instance, cmd);
+		goto fail_fw_init;
+	}
+
+	megasas_return_cmd(instance, cmd);
+
+	return 0;
+
+fail_fw_init:
+	return -EINVAL;
+}
+
+static u32
+megasas_init_adapter_mfi(struct megasas_instance *instance)
+{
+	struct megasas_register_set __iomem *reg_set;
+	u32 context_sz;
+	u32 reply_q_sz;
+
+	reg_set = instance->reg_set;
+
+	/*
+	 * Get various operational parameters from status register
+	 */
+	instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
+	/*
+	 * Reduce the max supported cmds by 1. This is to ensure that the
+	 * reply_q_sz (1 more than the max cmd that driver may send)
+	 * does not exceed max cmds that the FW can support
+	 */
+	instance->max_fw_cmds = instance->max_fw_cmds-1;
+	instance->max_mfi_cmds = instance->max_fw_cmds;
+	instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
+					0x10;
+	/*
+	 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
+	 * are reserved for IOCTL + driver's internal DCMDs.
+	 */
+	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+		instance->max_scsi_cmds = (instance->max_fw_cmds -
+			MEGASAS_SKINNY_INT_CMDS);
+		sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
+	} else {
+		instance->max_scsi_cmds = (instance->max_fw_cmds -
+			MEGASAS_INT_CMDS);
+		sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
+	}
+
+	instance->cur_can_queue = instance->max_scsi_cmds;
+	/*
+	 * Create a pool of commands
+	 */
+	if (megasas_alloc_cmds(instance))
+		goto fail_alloc_cmds;
+
+	/*
+	 * Allocate memory for reply queue. Length of reply queue should
+	 * be _one_ more than the maximum commands handled by the firmware.
+	 *
+	 * Note: When FW completes commands, it places corresponding contex
+	 * values in this circular reply queue. This circular queue is a fairly
+	 * typical producer-consumer queue. FW is the producer (of completed
+	 * commands) and the driver is the consumer.
+	 */
+	context_sz = sizeof(u32);
+	reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
+
+	instance->reply_queue = pci_alloc_consistent(instance->pdev,
+						     reply_q_sz,
+						     &instance->reply_queue_h);
+
+	if (!instance->reply_queue) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
+		goto fail_reply_queue;
+	}
+
+	if (megasas_issue_init_mfi(instance))
+		goto fail_fw_init;
+
+	if (megasas_get_ctrl_info(instance)) {
+		dev_err(&instance->pdev->dev, "(%d): Could get controller info "
+			"Fail from %s %d\n", instance->unique_id,
+			__func__, __LINE__);
+		goto fail_fw_init;
+	}
+
+	instance->fw_support_ieee = 0;
+	instance->fw_support_ieee =
+		(instance->instancet->read_fw_status_reg(reg_set) &
+		0x04000000);
+
+	dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
+			instance->fw_support_ieee);
+
+	if (instance->fw_support_ieee)
+		instance->flag_ieee = 1;
+
+	return 0;
+
+fail_fw_init:
+
+	pci_free_consistent(instance->pdev, reply_q_sz,
+			    instance->reply_queue, instance->reply_queue_h);
+fail_reply_queue:
+	megasas_free_cmds(instance);
+
+fail_alloc_cmds:
+	return 1;
+}
+
+/*
+ * megasas_setup_irqs_ioapic -		register legacy interrupts.
+ * @instance:				Adapter soft state
+ *
+ * Do not enable interrupt, only setup ISRs.
+ *
+ * Return 0 on success.
+ */
+static int
+megasas_setup_irqs_ioapic(struct megasas_instance *instance)
+{
+	struct pci_dev *pdev;
+
+	pdev = instance->pdev;
+	instance->irq_context[0].instance = instance;
+	instance->irq_context[0].MSIxIndex = 0;
+	if (request_irq(pci_irq_vector(pdev, 0),
+			instance->instancet->service_isr, IRQF_SHARED,
+			"megasas", &instance->irq_context[0])) {
+		dev_err(&instance->pdev->dev,
+				"Failed to register IRQ from %s %d\n",
+				__func__, __LINE__);
+		return -1;
+	}
+	return 0;
+}
+
+/**
+ * megasas_setup_irqs_msix -		register MSI-x interrupts.
+ * @instance:				Adapter soft state
+ * @is_probe:				Driver probe check
+ *
+ * Do not enable interrupt, only setup ISRs.
+ *
+ * Return 0 on success.
+ */
+static int
+megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
+{
+	int i, j;
+	struct pci_dev *pdev;
+
+	pdev = instance->pdev;
+
+	/* Try MSI-x */
+	for (i = 0; i < instance->msix_vectors; i++) {
+		instance->irq_context[i].instance = instance;
+		instance->irq_context[i].MSIxIndex = i;
+		if (request_irq(pci_irq_vector(pdev, i),
+			instance->instancet->service_isr, 0, "megasas",
+			&instance->irq_context[i])) {
+			dev_err(&instance->pdev->dev,
+				"Failed to register IRQ for vector %d.\n", i);
+			for (j = 0; j < i; j++)
+				free_irq(pci_irq_vector(pdev, j),
+					 &instance->irq_context[j]);
+			/* Retry irq register for IO_APIC*/
+			instance->msix_vectors = 0;
+			if (is_probe) {
+				pci_free_irq_vectors(instance->pdev);
+				return megasas_setup_irqs_ioapic(instance);
+			} else {
+				return -1;
+			}
+		}
+	}
+	return 0;
+}
+
+/*
+ * megasas_destroy_irqs-		unregister interrupts.
+ * @instance:				Adapter soft state
+ * return:				void
+ */
+static void
+megasas_destroy_irqs(struct megasas_instance *instance) {
+
+	int i;
+
+	if (instance->msix_vectors)
+		for (i = 0; i < instance->msix_vectors; i++) {
+			free_irq(pci_irq_vector(instance->pdev, i),
+				 &instance->irq_context[i]);
+		}
+	else
+		free_irq(pci_irq_vector(instance->pdev, 0),
+			 &instance->irq_context[0]);
+}
+
+/**
+ * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
+ * @instance:				Adapter soft state
+ * @is_probe:				Driver probe check
+ *
+ * Return 0 on success.
+ */
+void
+megasas_setup_jbod_map(struct megasas_instance *instance)
+{
+	int i;
+	struct fusion_context *fusion = instance->ctrl_context;
+	u32 pd_seq_map_sz;
+
+	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
+		(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
+
+	if (reset_devices || !fusion ||
+		!instance->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
+		dev_info(&instance->pdev->dev,
+			"Jbod map is not supported %s %d\n",
+			__func__, __LINE__);
+		instance->use_seqnum_jbod_fp = false;
+		return;
+	}
+
+	if (fusion->pd_seq_sync[0])
+		goto skip_alloc;
+
+	for (i = 0; i < JBOD_MAPS_COUNT; i++) {
+		fusion->pd_seq_sync[i] = dma_alloc_coherent
+			(&instance->pdev->dev, pd_seq_map_sz,
+			&fusion->pd_seq_phys[i], GFP_KERNEL);
+		if (!fusion->pd_seq_sync[i]) {
+			dev_err(&instance->pdev->dev,
+				"Failed to allocate memory from %s %d\n",
+				__func__, __LINE__);
+			if (i == 1) {
+				dma_free_coherent(&instance->pdev->dev,
+					pd_seq_map_sz, fusion->pd_seq_sync[0],
+					fusion->pd_seq_phys[0]);
+				fusion->pd_seq_sync[0] = NULL;
+			}
+			instance->use_seqnum_jbod_fp = false;
+			return;
+		}
+	}
+
+skip_alloc:
+	if (!megasas_sync_pd_seq_num(instance, false) &&
+		!megasas_sync_pd_seq_num(instance, true))
+		instance->use_seqnum_jbod_fp = true;
+	else
+		instance->use_seqnum_jbod_fp = false;
+}
+
+static void megasas_setup_reply_map(struct megasas_instance *instance)
+{
+	const struct cpumask *mask;
+	unsigned int queue, cpu;
+
+	for (queue = 0; queue < instance->msix_vectors; queue++) {
+		mask = pci_irq_get_affinity(instance->pdev, queue);
+		if (!mask)
+			goto fallback;
+
+		for_each_cpu(cpu, mask)
+			instance->reply_map[cpu] = queue;
+	}
+	return;
+
+fallback:
+	for_each_possible_cpu(cpu)
+		instance->reply_map[cpu] = cpu % instance->msix_vectors;
+}
+
+/**
+ * megasas_init_fw -	Initializes the FW
+ * @instance:		Adapter soft state
+ *
+ * This is the main function for initializing firmware
+ */
+
+static int megasas_init_fw(struct megasas_instance *instance)
+{
+	u32 max_sectors_1;
+	u32 max_sectors_2, tmp_sectors, msix_enable;
+	u32 scratch_pad_2, scratch_pad_3, scratch_pad_4;
+	resource_size_t base_addr;
+	struct megasas_register_set __iomem *reg_set;
+	struct megasas_ctrl_info *ctrl_info = NULL;
+	unsigned long bar_list;
+	int i, j, loop, fw_msix_count = 0;
+	struct IOV_111 *iovPtr;
+	struct fusion_context *fusion;
+
+	fusion = instance->ctrl_context;
+
+	/* Find first memory bar */
+	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
+	instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
+	if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
+					 "megasas: LSI")) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
+		return -EBUSY;
+	}
+
+	base_addr = pci_resource_start(instance->pdev, instance->bar);
+	instance->reg_set = ioremap_nocache(base_addr, 8192);
+
+	if (!instance->reg_set) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
+		goto fail_ioremap;
+	}
+
+	reg_set = instance->reg_set;
+
+	if (instance->adapter_type != MFI_SERIES)
+		instance->instancet = &megasas_instance_template_fusion;
+	else {
+		switch (instance->pdev->device) {
+		case PCI_DEVICE_ID_LSI_SAS1078R:
+		case PCI_DEVICE_ID_LSI_SAS1078DE:
+			instance->instancet = &megasas_instance_template_ppc;
+			break;
+		case PCI_DEVICE_ID_LSI_SAS1078GEN2:
+		case PCI_DEVICE_ID_LSI_SAS0079GEN2:
+			instance->instancet = &megasas_instance_template_gen2;
+			break;
+		case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
+		case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
+			instance->instancet = &megasas_instance_template_skinny;
+			break;
+		case PCI_DEVICE_ID_LSI_SAS1064R:
+		case PCI_DEVICE_ID_DELL_PERC5:
+		default:
+			instance->instancet = &megasas_instance_template_xscale;
+			instance->pd_list_not_supported = 1;
+			break;
+		}
+	}
+
+	if (megasas_transition_to_ready(instance, 0)) {
+		atomic_set(&instance->fw_reset_no_pci_access, 1);
+		instance->instancet->adp_reset
+			(instance, instance->reg_set);
+		atomic_set(&instance->fw_reset_no_pci_access, 0);
+		dev_info(&instance->pdev->dev,
+			"FW restarted successfully from %s!\n",
+			__func__);
+
+		/*waitting for about 30 second before retry*/
+		ssleep(30);
+
+		if (megasas_transition_to_ready(instance, 0))
+			goto fail_ready_state;
+	}
+
+	if (instance->adapter_type == VENTURA_SERIES) {
+		scratch_pad_3 =
+			readl(&instance->reg_set->outbound_scratch_pad_3);
+		instance->max_raid_mapsize = ((scratch_pad_3 >>
+			MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
+			MR_MAX_RAID_MAP_SIZE_MASK);
+	}
+
+	/* Check if MSI-X is supported while in ready state */
+	msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
+		       0x4000000) >> 0x1a;
+	if (msix_enable && !msix_disable) {
+		int irq_flags = PCI_IRQ_MSIX;
+
+		scratch_pad_2 = readl
+			(&instance->reg_set->outbound_scratch_pad_2);
+		/* Check max MSI-X vectors */
+		if (fusion) {
+			if (instance->adapter_type == THUNDERBOLT_SERIES) {
+				/* Thunderbolt Series*/
+				instance->msix_vectors = (scratch_pad_2
+					& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
+				fw_msix_count = instance->msix_vectors;
+			} else { /* Invader series supports more than 8 MSI-x vectors*/
+				instance->msix_vectors = ((scratch_pad_2
+					& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
+					>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
+				if (instance->msix_vectors > 16)
+					instance->msix_combined = true;
+
+				if (rdpq_enable)
+					instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
+								1 : 0;
+				fw_msix_count = instance->msix_vectors;
+				/* Save 1-15 reply post index address to local memory
+				 * Index 0 is already saved from reg offset
+				 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
+				 */
+				for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
+					instance->reply_post_host_index_addr[loop] =
+						(u32 __iomem *)
+						((u8 __iomem *)instance->reg_set +
+						MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
+						+ (loop * 0x10));
+				}
+			}
+			if (msix_vectors)
+				instance->msix_vectors = min(msix_vectors,
+					instance->msix_vectors);
+		} else /* MFI adapters */
+			instance->msix_vectors = 1;
+		/* Don't bother allocating more MSI-X vectors than cpus */
+		instance->msix_vectors = min(instance->msix_vectors,
+					     (unsigned int)num_online_cpus());
+		if (smp_affinity_enable)
+			irq_flags |= PCI_IRQ_AFFINITY;
+		i = pci_alloc_irq_vectors(instance->pdev, 1,
+					  instance->msix_vectors, irq_flags);
+		if (i > 0)
+			instance->msix_vectors = i;
+		else
+			instance->msix_vectors = 0;
+	}
+	/*
+	 * MSI-X host index 0 is common for all adapter.
+	 * It is used for all MPT based Adapters.
+	 */
+	if (instance->msix_combined) {
+		instance->reply_post_host_index_addr[0] =
+				(u32 *)((u8 *)instance->reg_set +
+				MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
+	} else {
+		instance->reply_post_host_index_addr[0] =
+			(u32 *)((u8 *)instance->reg_set +
+			MPI2_REPLY_POST_HOST_INDEX_OFFSET);
+	}
+
+	if (!instance->msix_vectors) {
+		i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
+		if (i < 0)
+			goto fail_init_adapter;
+	}
+
+	megasas_setup_reply_map(instance);
+
+	dev_info(&instance->pdev->dev,
+		"firmware supports msix\t: (%d)", fw_msix_count);
+	dev_info(&instance->pdev->dev,
+		"current msix/online cpus\t: (%d/%d)\n",
+		instance->msix_vectors, (unsigned int)num_online_cpus());
+	dev_info(&instance->pdev->dev,
+		"RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
+
+	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
+		(unsigned long)instance);
+
+	instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
+				GFP_KERNEL);
+	if (instance->ctrl_info == NULL)
+		goto fail_init_adapter;
+
+	/*
+	 * Below are default value for legacy Firmware.
+	 * non-fusion based controllers
+	 */
+	instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
+	instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
+	/* Get operational params, sge flags, send init cmd to controller */
+	if (instance->instancet->init_adapter(instance))
+		goto fail_init_adapter;
+
+	if (instance->adapter_type == VENTURA_SERIES) {
+		scratch_pad_4 =
+			readl(&instance->reg_set->outbound_scratch_pad_4);
+		if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
+			MR_DEFAULT_NVME_PAGE_SHIFT)
+			instance->nvme_page_size =
+				(1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK));
+
+		dev_info(&instance->pdev->dev,
+			 "NVME page size\t: (%d)\n", instance->nvme_page_size);
+	}
+
+	if (instance->msix_vectors ?
+		megasas_setup_irqs_msix(instance, 1) :
+		megasas_setup_irqs_ioapic(instance))
+		goto fail_init_adapter;
+
+	instance->instancet->enable_intr(instance);
+
+	dev_info(&instance->pdev->dev, "INIT adapter done\n");
+
+	megasas_setup_jbod_map(instance);
+
+	/** for passthrough
+	 * the following function will get the PD LIST.
+	 */
+	memset(instance->pd_list, 0,
+		(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
+	if (megasas_get_pd_list(instance) < 0) {
+		dev_err(&instance->pdev->dev, "failed to get PD list\n");
+		goto fail_get_ld_pd_list;
+	}
+
+	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+
+	/* stream detection initialization */
+	if (instance->adapter_type == VENTURA_SERIES) {
+		fusion->stream_detect_by_ld =
+			kzalloc(sizeof(struct LD_STREAM_DETECT *)
+			* MAX_LOGICAL_DRIVES_EXT,
+			GFP_KERNEL);
+		if (!fusion->stream_detect_by_ld) {
+			dev_err(&instance->pdev->dev,
+				"unable to allocate stream detection for pool of LDs\n");
+			goto fail_get_ld_pd_list;
+		}
+		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
+			fusion->stream_detect_by_ld[i] =
+				kmalloc(sizeof(struct LD_STREAM_DETECT),
+				GFP_KERNEL);
+			if (!fusion->stream_detect_by_ld[i]) {
+				dev_err(&instance->pdev->dev,
+					"unable to allocate stream detect by LD\n ");
+				for (j = 0; j < i; ++j)
+					kfree(fusion->stream_detect_by_ld[j]);
+				kfree(fusion->stream_detect_by_ld);
+				fusion->stream_detect_by_ld = NULL;
+				goto fail_get_ld_pd_list;
+			}
+			fusion->stream_detect_by_ld[i]->mru_bit_map
+				= MR_STREAM_BITMAP;
+		}
+	}
+
+	if (megasas_ld_list_query(instance,
+				  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+		goto fail_get_ld_pd_list;
+
+	/*
+	 * Compute the max allowed sectors per IO: The controller info has two
+	 * limits on max sectors. Driver should use the minimum of these two.
+	 *
+	 * 1 << stripe_sz_ops.min = max sectors per strip
+	 *
+	 * Note that older firmwares ( < FW ver 30) didn't report information
+	 * to calculate max_sectors_1. So the number ended up as zero always.
+	 */
+	tmp_sectors = 0;
+	ctrl_info = instance->ctrl_info;
+
+	max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
+		le16_to_cpu(ctrl_info->max_strips_per_io);
+	max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
+
+	tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
+
+	instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
+	instance->passive = ctrl_info->cluster.passive;
+	memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
+	instance->UnevenSpanSupport =
+		ctrl_info->adapterOperations2.supportUnevenSpans;
+	if (instance->UnevenSpanSupport) {
+		struct fusion_context *fusion = instance->ctrl_context;
+		if (MR_ValidateMapInfo(instance))
+			fusion->fast_path_io = 1;
+		else
+			fusion->fast_path_io = 0;
+
+	}
+	if (ctrl_info->host_interface.SRIOV) {
+		instance->requestorId = ctrl_info->iov.requestorId;
+		if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
+			if (!ctrl_info->adapterOperations2.activePassive)
+			    instance->PlasmaFW111 = 1;
+
+			dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
+			    instance->PlasmaFW111 ? "1.11" : "new");
+
+			if (instance->PlasmaFW111) {
+			    iovPtr = (struct IOV_111 *)
+				((unsigned char *)ctrl_info + IOV_111_OFFSET);
+			    instance->requestorId = iovPtr->requestorId;
+			}
+		}
+		dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
+			instance->requestorId);
+	}
+
+	instance->crash_dump_fw_support =
+		ctrl_info->adapterOperations3.supportCrashDump;
+	instance->crash_dump_drv_support =
+		(instance->crash_dump_fw_support &&
+		instance->crash_dump_buf);
+	if (instance->crash_dump_drv_support)
+		megasas_set_crash_dump_params(instance,
+			MR_CRASH_BUF_TURN_OFF);
+
+	else {
+		if (instance->crash_dump_buf)
+			pci_free_consistent(instance->pdev,
+				CRASH_DMA_BUF_SIZE,
+				instance->crash_dump_buf,
+				instance->crash_dump_h);
+		instance->crash_dump_buf = NULL;
+	}
+
+
+	dev_info(&instance->pdev->dev,
+		"pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
+		le16_to_cpu(ctrl_info->pci.vendor_id),
+		le16_to_cpu(ctrl_info->pci.device_id),
+		le16_to_cpu(ctrl_info->pci.sub_vendor_id),
+		le16_to_cpu(ctrl_info->pci.sub_device_id));
+	dev_info(&instance->pdev->dev, "unevenspan support	: %s\n",
+		instance->UnevenSpanSupport ? "yes" : "no");
+	dev_info(&instance->pdev->dev, "firmware crash dump	: %s\n",
+		instance->crash_dump_drv_support ? "yes" : "no");
+	dev_info(&instance->pdev->dev, "jbod sync map		: %s\n",
+		instance->use_seqnum_jbod_fp ? "yes" : "no");
+
+
+	instance->max_sectors_per_req = instance->max_num_sge *
+						SGE_BUFFER_SIZE / 512;
+	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
+		instance->max_sectors_per_req = tmp_sectors;
+
+	/* Check for valid throttlequeuedepth module parameter */
+	if (throttlequeuedepth &&
+			throttlequeuedepth <= instance->max_scsi_cmds)
+		instance->throttlequeuedepth = throttlequeuedepth;
+	else
+		instance->throttlequeuedepth =
+				MEGASAS_THROTTLE_QUEUE_DEPTH;
+
+	if ((resetwaittime < 1) ||
+	    (resetwaittime > MEGASAS_RESET_WAIT_TIME))
+		resetwaittime = MEGASAS_RESET_WAIT_TIME;
+
+	if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
+		scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
+
+	/* Launch SR-IOV heartbeat timer */
+	if (instance->requestorId) {
+		if (!megasas_sriov_start_heartbeat(instance, 1))
+			megasas_start_timer(instance,
+					    &instance->sriov_heartbeat_timer,
+					    megasas_sriov_heartbeat_handler,
+					    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+		else
+			instance->skip_heartbeat_timer_del = 1;
+	}
+
+	return 0;
+
+fail_get_ld_pd_list:
+	instance->instancet->disable_intr(instance);
+	megasas_destroy_irqs(instance);
+fail_init_adapter:
+	if (instance->msix_vectors)
+		pci_free_irq_vectors(instance->pdev);
+	instance->msix_vectors = 0;
+fail_ready_state:
+	kfree(instance->ctrl_info);
+	instance->ctrl_info = NULL;
+	iounmap(instance->reg_set);
+
+fail_ioremap:
+	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
+
+	dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+		__func__, __LINE__);
+	return -EINVAL;
+}
+
+/**
+ * megasas_release_mfi -	Reverses the FW initialization
+ * @instance:			Adapter soft state
+ */
+static void megasas_release_mfi(struct megasas_instance *instance)
+{
+	u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
+
+	if (instance->reply_queue)
+		pci_free_consistent(instance->pdev, reply_q_sz,
+			    instance->reply_queue, instance->reply_queue_h);
+
+	megasas_free_cmds(instance);
+
+	iounmap(instance->reg_set);
+
+	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
+}
+
+/**
+ * megasas_get_seq_num -	Gets latest event sequence numbers
+ * @instance:			Adapter soft state
+ * @eli:			FW event log sequence numbers information
+ *
+ * FW maintains a log of all events in a non-volatile area. Upper layers would
+ * usually find out the latest sequence number of the events, the seq number at
+ * the boot etc. They would "read" all the events below the latest seq number
+ * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
+ * number), they would subsribe to AEN (asynchronous event notification) and
+ * wait for the events to happen.
+ */
+static int
+megasas_get_seq_num(struct megasas_instance *instance,
+		    struct megasas_evt_log_info *eli)
+{
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct megasas_evt_log_info *el_info;
+	dma_addr_t el_info_h = 0;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+	el_info = pci_alloc_consistent(instance->pdev,
+				       sizeof(struct megasas_evt_log_info),
+				       &el_info_h);
+
+	if (!el_info) {
+		megasas_return_cmd(instance, cmd);
+		return -ENOMEM;
+	}
+
+	memset(el_info, 0, sizeof(*el_info));
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0x0;
+	dcmd->sge_count = 1;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
+	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
+
+	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
+		DCMD_SUCCESS) {
+		/*
+		 * Copy the data back into callers buffer
+		 */
+		eli->newest_seq_num = el_info->newest_seq_num;
+		eli->oldest_seq_num = el_info->oldest_seq_num;
+		eli->clear_seq_num = el_info->clear_seq_num;
+		eli->shutdown_seq_num = el_info->shutdown_seq_num;
+		eli->boot_seq_num = el_info->boot_seq_num;
+	} else
+		dev_err(&instance->pdev->dev, "DCMD failed "
+			"from %s\n", __func__);
+
+	pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
+			    el_info, el_info_h);
+
+	megasas_return_cmd(instance, cmd);
+
+	return 0;
+}
+
+/**
+ * megasas_register_aen -	Registers for asynchronous event notification
+ * @instance:			Adapter soft state
+ * @seq_num:			The starting sequence number
+ * @class_locale:		Class of the event
+ *
+ * This function subscribes for AEN for events beyond the @seq_num. It requests
+ * to be notified if and only if the event is of type @class_locale
+ */
+static int
+megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
+		     u32 class_locale_word)
+{
+	int ret_val;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	union megasas_evt_class_locale curr_aen;
+	union megasas_evt_class_locale prev_aen;
+
+	/*
+	 * If there an AEN pending already (aen_cmd), check if the
+	 * class_locale of that pending AEN is inclusive of the new
+	 * AEN request we currently have. If it is, then we don't have
+	 * to do anything. In other words, whichever events the current
+	 * AEN request is subscribing to, have already been subscribed
+	 * to.
+	 *
+	 * If the old_cmd is _not_ inclusive, then we have to abort
+	 * that command, form a class_locale that is superset of both
+	 * old and current and re-issue to the FW
+	 */
+
+	curr_aen.word = class_locale_word;
+
+	if (instance->aen_cmd) {
+
+		prev_aen.word =
+			le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
+
+		if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
+		    (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
+			dev_info(&instance->pdev->dev,
+				 "%s %d out of range class %d send by application\n",
+				 __func__, __LINE__, curr_aen.members.class);
+			return 0;
+		}
+
+		/*
+		 * A class whose enum value is smaller is inclusive of all
+		 * higher values. If a PROGRESS (= -1) was previously
+		 * registered, then a new registration requests for higher
+		 * classes need not be sent to FW. They are automatically
+		 * included.
+		 *
+		 * Locale numbers don't have such hierarchy. They are bitmap
+		 * values
+		 */
+		if ((prev_aen.members.class <= curr_aen.members.class) &&
+		    !((prev_aen.members.locale & curr_aen.members.locale) ^
+		      curr_aen.members.locale)) {
+			/*
+			 * Previously issued event registration includes
+			 * current request. Nothing to do.
+			 */
+			return 0;
+		} else {
+			curr_aen.members.locale |= prev_aen.members.locale;
+
+			if (prev_aen.members.class < curr_aen.members.class)
+				curr_aen.members.class = prev_aen.members.class;
+
+			instance->aen_cmd->abort_aen = 1;
+			ret_val = megasas_issue_blocked_abort_cmd(instance,
+								  instance->
+								  aen_cmd, 30);
+
+			if (ret_val) {
+				dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
+				       "previous AEN command\n");
+				return ret_val;
+			}
+		}
+	}
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd)
+		return -ENOMEM;
+
+	dcmd = &cmd->frame->dcmd;
+
+	memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
+
+	/*
+	 * Prepare DCMD for aen registration
+	 */
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0x0;
+	dcmd->sge_count = 1;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
+	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
+	dcmd->mbox.w[0] = cpu_to_le32(seq_num);
+	instance->last_seq_num = seq_num;
+	dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
+
+	if (instance->aen_cmd != NULL) {
+		megasas_return_cmd(instance, cmd);
+		return 0;
+	}
+
+	/*
+	 * Store reference to the cmd used to register for AEN. When an
+	 * application wants us to register for AEN, we have to abort this
+	 * cmd and re-register with a new EVENT LOCALE supplied by that app
+	 */
+	instance->aen_cmd = cmd;
+
+	/*
+	 * Issue the aen registration frame
+	 */
+	instance->instancet->issue_dcmd(instance, cmd);
+
+	return 0;
+}
+
+/* megasas_get_target_prop - Send DCMD with below details to firmware.
+ *
+ * This DCMD will fetch few properties of LD/system PD defined
+ * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
+ *
+ * DCMD send by drivers whenever new target is added to the OS.
+ *
+ * dcmd.opcode         - MR_DCMD_DEV_GET_TARGET_PROP
+ * dcmd.mbox.b[0]      - DCMD is to be fired for LD or system PD.
+ *                       0 = system PD, 1 = LD.
+ * dcmd.mbox.s[1]      - TargetID for LD/system PD.
+ * dcmd.sge IN         - Pointer to return MR_TARGET_DEV_PROPERTIES.
+ *
+ * @instance:		Adapter soft state
+ * @sdev:		OS provided scsi device
+ *
+ * Returns 0 on success non-zero on failure.
+ */
+static int
+megasas_get_target_prop(struct megasas_instance *instance,
+			struct scsi_device *sdev)
+{
+	int ret;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
+			sdev->id;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		dev_err(&instance->pdev->dev,
+			"Failed to get cmd %s\n", __func__);
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+	dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
+
+	dcmd->mbox.s[1] = cpu_to_le16(targetId);
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0xFF;
+	dcmd->sge_count = 1;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len =
+		cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
+	dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
+	dcmd->sgl.sge32[0].phys_addr =
+		cpu_to_le32(instance->tgt_prop_h);
+	dcmd->sgl.sge32[0].length =
+		cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
+
+	if ((instance->adapter_type != MFI_SERIES) &&
+	    !instance->mask_interrupts)
+		ret = megasas_issue_blocked_cmd(instance,
+						cmd, MFI_IO_TIMEOUT_SECS);
+	else
+		ret = megasas_issue_polled(instance, cmd);
+
+	switch (ret) {
+	case DCMD_TIMEOUT:
+		switch (dcmd_timeout_ocr_possible(instance)) {
+		case INITIATE_OCR:
+			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+			megasas_reset_fusion(instance->host,
+					     MFI_IO_TIMEOUT_OCR);
+			break;
+		case KILL_ADAPTER:
+			megaraid_sas_kill_hba(instance);
+			break;
+		case IGNORE_TIMEOUT:
+			dev_info(&instance->pdev->dev,
+				 "Ignore DCMD timeout: %s %d\n",
+				 __func__, __LINE__);
+			break;
+		}
+		break;
+
+	default:
+		megasas_return_cmd(instance, cmd);
+	}
+	if (ret != DCMD_SUCCESS)
+		dev_err(&instance->pdev->dev,
+			"return from %s %d return value %d\n",
+			__func__, __LINE__, ret);
+
+	return ret;
+}
+
+/**
+ * megasas_start_aen -	Subscribes to AEN during driver load time
+ * @instance:		Adapter soft state
+ */
+static int megasas_start_aen(struct megasas_instance *instance)
+{
+	struct megasas_evt_log_info eli;
+	union megasas_evt_class_locale class_locale;
+
+	/*
+	 * Get the latest sequence number from FW
+	 */
+	memset(&eli, 0, sizeof(eli));
+
+	if (megasas_get_seq_num(instance, &eli))
+		return -1;
+
+	/*
+	 * Register AEN with FW for latest sequence number plus 1
+	 */
+	class_locale.members.reserved = 0;
+	class_locale.members.locale = MR_EVT_LOCALE_ALL;
+	class_locale.members.class = MR_EVT_CLASS_DEBUG;
+
+	return megasas_register_aen(instance,
+			le32_to_cpu(eli.newest_seq_num) + 1,
+			class_locale.word);
+}
+
+/**
+ * megasas_io_attach -	Attaches this driver to SCSI mid-layer
+ * @instance:		Adapter soft state
+ */
+static int megasas_io_attach(struct megasas_instance *instance)
+{
+	struct Scsi_Host *host = instance->host;
+
+	/*
+	 * Export parameters required by SCSI mid-layer
+	 */
+	host->unique_id = instance->unique_id;
+	host->can_queue = instance->max_scsi_cmds;
+	host->this_id = instance->init_id;
+	host->sg_tablesize = instance->max_num_sge;
+
+	if (instance->fw_support_ieee)
+		instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
+
+	/*
+	 * Check if the module parameter value for max_sectors can be used
+	 */
+	if (max_sectors && max_sectors < instance->max_sectors_per_req)
+		instance->max_sectors_per_req = max_sectors;
+	else {
+		if (max_sectors) {
+			if (((instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
+				(instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
+				(max_sectors <= MEGASAS_MAX_SECTORS)) {
+				instance->max_sectors_per_req = max_sectors;
+			} else {
+			dev_info(&instance->pdev->dev, "max_sectors should be > 0"
+				"and <= %d (or < 1MB for GEN2 controller)\n",
+				instance->max_sectors_per_req);
+			}
+		}
+	}
+
+	host->max_sectors = instance->max_sectors_per_req;
+	host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
+	host->max_channel = MEGASAS_MAX_CHANNELS - 1;
+	host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
+	host->max_lun = MEGASAS_MAX_LUN;
+	host->max_cmd_len = 16;
+
+	/*
+	 * Notify the mid-layer about the new controller
+	 */
+	if (scsi_add_host(host, &instance->pdev->dev)) {
+		dev_err(&instance->pdev->dev,
+			"Failed to add host from %s %d\n",
+			__func__, __LINE__);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int
+megasas_set_dma_mask(struct pci_dev *pdev)
+{
+	/*
+	 * All our controllers are capable of performing 64-bit DMA
+	 */
+	if (IS_DMA64) {
+		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
+
+			if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+				goto fail_set_dma_mask;
+		}
+	} else {
+		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+			goto fail_set_dma_mask;
+	}
+	/*
+	 * Ensure that all data structures are allocated in 32-bit
+	 * memory.
+	 */
+	if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+		/* Try 32bit DMA mask and 32 bit Consistent dma mask */
+		if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+			&& !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+			dev_info(&pdev->dev, "set 32bit DMA mask"
+				"and 32 bit consistent mask\n");
+		else
+			goto fail_set_dma_mask;
+	}
+
+	return 0;
+
+fail_set_dma_mask:
+	return 1;
+}
+
+/*
+ * megasas_set_adapter_type -	Set adapter type.
+ *				Supported controllers can be divided in
+ *				4 categories-  enum MR_ADAPTER_TYPE {
+ *							MFI_SERIES = 1,
+ *							THUNDERBOLT_SERIES = 2,
+ *							INVADER_SERIES = 3,
+ *							VENTURA_SERIES = 4,
+ *						};
+ * @instance:			Adapter soft state
+ * return:			void
+ */
+static inline void megasas_set_adapter_type(struct megasas_instance *instance)
+{
+	switch (instance->pdev->device) {
+	case PCI_DEVICE_ID_LSI_VENTURA:
+	case PCI_DEVICE_ID_LSI_HARPOON:
+	case PCI_DEVICE_ID_LSI_TOMCAT:
+	case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
+	case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
+		instance->adapter_type = VENTURA_SERIES;
+		break;
+	case PCI_DEVICE_ID_LSI_FUSION:
+	case PCI_DEVICE_ID_LSI_PLASMA:
+		instance->adapter_type = THUNDERBOLT_SERIES;
+		break;
+	case PCI_DEVICE_ID_LSI_INVADER:
+	case PCI_DEVICE_ID_LSI_INTRUDER:
+	case PCI_DEVICE_ID_LSI_INTRUDER_24:
+	case PCI_DEVICE_ID_LSI_CUTLASS_52:
+	case PCI_DEVICE_ID_LSI_CUTLASS_53:
+	case PCI_DEVICE_ID_LSI_FURY:
+		instance->adapter_type = INVADER_SERIES;
+		break;
+	default: /* For all other supported controllers */
+		instance->adapter_type = MFI_SERIES;
+		break;
+	}
+}
+
+static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
+{
+	instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32),
+						  &instance->producer_h);
+	instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32),
+						  &instance->consumer_h);
+
+	if (!instance->producer || !instance->consumer) {
+		dev_err(&instance->pdev->dev,
+			"Failed to allocate memory for producer, consumer\n");
+		return -1;
+	}
+
+	*instance->producer = 0;
+	*instance->consumer = 0;
+	return 0;
+}
+
+/**
+ * megasas_alloc_ctrl_mem -	Allocate per controller memory for core data
+ *				structures which are not common across MFI
+ *				adapters and fusion adapters.
+ *				For MFI based adapters, allocate producer and
+ *				consumer buffers. For fusion adapters, allocate
+ *				memory for fusion context.
+ * @instance:			Adapter soft state
+ * return:			0 for SUCCESS
+ */
+static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
+{
+	instance->reply_map = kzalloc(sizeof(unsigned int) * nr_cpu_ids,
+				      GFP_KERNEL);
+	if (!instance->reply_map)
+		return -ENOMEM;
+
+	switch (instance->adapter_type) {
+	case MFI_SERIES:
+		if (megasas_alloc_mfi_ctrl_mem(instance))
+			goto fail;
+		break;
+	case VENTURA_SERIES:
+	case THUNDERBOLT_SERIES:
+	case INVADER_SERIES:
+		if (megasas_alloc_fusion_context(instance))
+			goto fail;
+		break;
+	}
+
+	return 0;
+ fail:
+	kfree(instance->reply_map);
+	instance->reply_map = NULL;
+	return -ENOMEM;
+}
+
+/*
+ * megasas_free_ctrl_mem -	Free fusion context for fusion adapters and
+ *				producer, consumer buffers for MFI adapters
+ *
+ * @instance -			Adapter soft instance
+ *
+ */
+static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
+{
+	kfree(instance->reply_map);
+	if (instance->adapter_type == MFI_SERIES) {
+		if (instance->producer)
+			pci_free_consistent(instance->pdev, sizeof(u32),
+					    instance->producer,
+					    instance->producer_h);
+		if (instance->consumer)
+			pci_free_consistent(instance->pdev, sizeof(u32),
+					    instance->consumer,
+					    instance->consumer_h);
+	} else {
+		megasas_free_fusion_context(instance);
+	}
+}
+
+/**
+ * megasas_probe_one -	PCI hotplug entry point
+ * @pdev:		PCI device structure
+ * @id:			PCI ids of supported hotplugged adapter
+ */
+static int megasas_probe_one(struct pci_dev *pdev,
+			     const struct pci_device_id *id)
+{
+	int rval, pos;
+	struct Scsi_Host *host;
+	struct megasas_instance *instance;
+	u16 control = 0;
+
+	/* Reset MSI-X in the kdump kernel */
+	if (reset_devices) {
+		pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+		if (pos) {
+			pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
+					     &control);
+			if (control & PCI_MSIX_FLAGS_ENABLE) {
+				dev_info(&pdev->dev, "resetting MSI-X\n");
+				pci_write_config_word(pdev,
+						      pos + PCI_MSIX_FLAGS,
+						      control &
+						      ~PCI_MSIX_FLAGS_ENABLE);
+			}
+		}
+	}
+
+	/*
+	 * PCI prepping: enable device set bus mastering and dma mask
+	 */
+	rval = pci_enable_device_mem(pdev);
+
+	if (rval) {
+		return rval;
+	}
+
+	pci_set_master(pdev);
+
+	if (megasas_set_dma_mask(pdev))
+		goto fail_set_dma_mask;
+
+	host = scsi_host_alloc(&megasas_template,
+			       sizeof(struct megasas_instance));
+
+	if (!host) {
+		dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
+		goto fail_alloc_instance;
+	}
+
+	instance = (struct megasas_instance *)host->hostdata;
+	memset(instance, 0, sizeof(*instance));
+	atomic_set(&instance->fw_reset_no_pci_access, 0);
+	instance->pdev = pdev;
+
+	megasas_set_adapter_type(instance);
+
+	if (megasas_alloc_ctrl_mem(instance))
+		goto fail_alloc_dma_buf;
+
+	/* Crash dump feature related initialisation*/
+	instance->drv_buf_index = 0;
+	instance->drv_buf_alloc = 0;
+	instance->crash_dump_fw_support = 0;
+	instance->crash_dump_app_support = 0;
+	instance->fw_crash_state = UNAVAILABLE;
+	spin_lock_init(&instance->crashdump_lock);
+	instance->crash_dump_buf = NULL;
+
+	megasas_poll_wait_aen = 0;
+	instance->flag_ieee = 0;
+	instance->ev = NULL;
+	instance->issuepend_done = 1;
+	atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
+	instance->is_imr = 0;
+
+	instance->evt_detail = pci_alloc_consistent(pdev,
+						    sizeof(struct
+							   megasas_evt_detail),
+						    &instance->evt_detail_h);
+
+	if (!instance->evt_detail) {
+		dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate memory for "
+		       "event detail structure\n");
+		goto fail_alloc_dma_buf;
+	}
+
+	if (!reset_devices) {
+		instance->system_info_buf = pci_zalloc_consistent(pdev,
+					sizeof(struct MR_DRV_SYSTEM_INFO),
+					&instance->system_info_h);
+		if (!instance->system_info_buf)
+			dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n");
+
+		instance->pd_info = pci_alloc_consistent(pdev,
+			sizeof(struct MR_PD_INFO), &instance->pd_info_h);
+
+		if (!instance->pd_info)
+			dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
+
+		instance->tgt_prop = pci_alloc_consistent(pdev,
+			sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h);
+
+		if (!instance->tgt_prop)
+			dev_err(&instance->pdev->dev, "Failed to alloc mem for tgt_prop\n");
+
+		instance->crash_dump_buf = pci_alloc_consistent(pdev,
+						CRASH_DMA_BUF_SIZE,
+						&instance->crash_dump_h);
+		if (!instance->crash_dump_buf)
+			dev_err(&pdev->dev, "Can't allocate Firmware "
+				"crash dump DMA buffer\n");
+	}
+
+	/*
+	 * Initialize locks and queues
+	 */
+	INIT_LIST_HEAD(&instance->cmd_pool);
+	INIT_LIST_HEAD(&instance->internal_reset_pending_q);
+
+	atomic_set(&instance->fw_outstanding,0);
+
+	init_waitqueue_head(&instance->int_cmd_wait_q);
+	init_waitqueue_head(&instance->abort_cmd_wait_q);
+
+	spin_lock_init(&instance->mfi_pool_lock);
+	spin_lock_init(&instance->hba_lock);
+	spin_lock_init(&instance->stream_lock);
+	spin_lock_init(&instance->completion_lock);
+
+	mutex_init(&instance->reset_mutex);
+	mutex_init(&instance->hba_mutex);
+
+	/*
+	 * Initialize PCI related and misc parameters
+	 */
+	instance->host = host;
+	instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
+	instance->init_id = MEGASAS_DEFAULT_INIT_ID;
+	instance->ctrl_info = NULL;
+
+
+	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
+		instance->flag_ieee = 1;
+
+	megasas_dbg_lvl = 0;
+	instance->flag = 0;
+	instance->unload = 1;
+	instance->last_time = 0;
+	instance->disableOnlineCtrlReset = 1;
+	instance->UnevenSpanSupport = 0;
+
+	if (instance->adapter_type != MFI_SERIES) {
+		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
+		INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
+	} else
+		INIT_WORK(&instance->work_init, process_fw_state_change_wq);
+
+	/*
+	 * Initialize MFI Firmware
+	 */
+	if (megasas_init_fw(instance))
+		goto fail_init_mfi;
+
+	if (instance->requestorId) {
+		if (instance->PlasmaFW111) {
+			instance->vf_affiliation_111 =
+				pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
+						     &instance->vf_affiliation_111_h);
+			if (!instance->vf_affiliation_111)
+				dev_warn(&pdev->dev, "Can't allocate "
+				       "memory for VF affiliation buffer\n");
+		} else {
+			instance->vf_affiliation =
+				pci_alloc_consistent(pdev,
+						     (MAX_LOGICAL_DRIVES + 1) *
+						     sizeof(struct MR_LD_VF_AFFILIATION),
+						     &instance->vf_affiliation_h);
+			if (!instance->vf_affiliation)
+				dev_warn(&pdev->dev, "Can't allocate "
+				       "memory for VF affiliation buffer\n");
+		}
+	}
+
+	/*
+	 * Store instance in PCI softstate
+	 */
+	pci_set_drvdata(pdev, instance);
+
+	/*
+	 * Add this controller to megasas_mgmt_info structure so that it
+	 * can be exported to management applications
+	 */
+	megasas_mgmt_info.count++;
+	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
+	megasas_mgmt_info.max_index++;
+
+	/*
+	 * Register with SCSI mid-layer
+	 */
+	if (megasas_io_attach(instance))
+		goto fail_io_attach;
+
+	instance->unload = 0;
+	/*
+	 * Trigger SCSI to scan our drives
+	 */
+	scsi_scan_host(host);
+
+	/*
+	 * Initiate AEN (Asynchronous Event Notification)
+	 */
+	if (megasas_start_aen(instance)) {
+		dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
+		goto fail_start_aen;
+	}
+
+	/* Get current SR-IOV LD/VF affiliation */
+	if (instance->requestorId)
+		megasas_get_ld_vf_affiliation(instance, 1);
+
+	return 0;
+
+fail_start_aen:
+fail_io_attach:
+	megasas_mgmt_info.count--;
+	megasas_mgmt_info.max_index--;
+	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
+
+	instance->instancet->disable_intr(instance);
+	megasas_destroy_irqs(instance);
+
+	if (instance->adapter_type != MFI_SERIES)
+		megasas_release_fusion(instance);
+	else
+		megasas_release_mfi(instance);
+	if (instance->msix_vectors)
+		pci_free_irq_vectors(instance->pdev);
+fail_init_mfi:
+fail_alloc_dma_buf:
+	if (instance->evt_detail)
+		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+				    instance->evt_detail,
+				    instance->evt_detail_h);
+
+	if (instance->pd_info)
+		pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+					instance->pd_info,
+					instance->pd_info_h);
+	if (instance->tgt_prop)
+		pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
+					instance->tgt_prop,
+					instance->tgt_prop_h);
+	megasas_free_ctrl_mem(instance);
+	scsi_host_put(host);
+fail_alloc_instance:
+fail_set_dma_mask:
+	pci_disable_device(pdev);
+
+	return -ENODEV;
+}
+
+/**
+ * megasas_flush_cache -	Requests FW to flush all its caches
+ * @instance:			Adapter soft state
+ */
+static void megasas_flush_cache(struct megasas_instance *instance)
+{
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+
+	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
+		return;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd)
+		return;
+
+	dcmd = &cmd->frame->dcmd;
+
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0x0;
+	dcmd->sge_count = 0;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = 0;
+	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
+	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
+
+	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
+			!= DCMD_SUCCESS) {
+		dev_err(&instance->pdev->dev,
+			"return from %s %d\n", __func__, __LINE__);
+		return;
+	}
+
+	megasas_return_cmd(instance, cmd);
+}
+
+/**
+ * megasas_shutdown_controller -	Instructs FW to shutdown the controller
+ * @instance:				Adapter soft state
+ * @opcode:				Shutdown/Hibernate
+ */
+static void megasas_shutdown_controller(struct megasas_instance *instance,
+					u32 opcode)
+{
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+
+	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
+		return;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd)
+		return;
+
+	if (instance->aen_cmd)
+		megasas_issue_blocked_abort_cmd(instance,
+			instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
+	if (instance->map_update_cmd)
+		megasas_issue_blocked_abort_cmd(instance,
+			instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
+	if (instance->jbod_seq_cmd)
+		megasas_issue_blocked_abort_cmd(instance,
+			instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
+
+	dcmd = &cmd->frame->dcmd;
+
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0x0;
+	dcmd->sge_count = 0;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = 0;
+	dcmd->opcode = cpu_to_le32(opcode);
+
+	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
+			!= DCMD_SUCCESS) {
+		dev_err(&instance->pdev->dev,
+			"return from %s %d\n", __func__, __LINE__);
+		return;
+	}
+
+	megasas_return_cmd(instance, cmd);
+}
+
+#ifdef CONFIG_PM
+/**
+ * megasas_suspend -	driver suspend entry point
+ * @pdev:		PCI device structure
+ * @state:		PCI power state to suspend routine
+ */
+static int
+megasas_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct Scsi_Host *host;
+	struct megasas_instance *instance;
+
+	instance = pci_get_drvdata(pdev);
+	host = instance->host;
+	instance->unload = 1;
+
+	/* Shutdown SR-IOV heartbeat timer */
+	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+		del_timer_sync(&instance->sriov_heartbeat_timer);
+
+	megasas_flush_cache(instance);
+	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
+
+	/* cancel the delayed work if this work still in queue */
+	if (instance->ev != NULL) {
+		struct megasas_aen_event *ev = instance->ev;
+		cancel_delayed_work_sync(&ev->hotplug_work);
+		instance->ev = NULL;
+	}
+
+	tasklet_kill(&instance->isr_tasklet);
+
+	pci_set_drvdata(instance->pdev, instance);
+	instance->instancet->disable_intr(instance);
+
+	megasas_destroy_irqs(instance);
+
+	if (instance->msix_vectors)
+		pci_free_irq_vectors(instance->pdev);
+
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+/**
+ * megasas_resume-      driver resume entry point
+ * @pdev:               PCI device structure
+ */
+static int
+megasas_resume(struct pci_dev *pdev)
+{
+	int rval;
+	struct Scsi_Host *host;
+	struct megasas_instance *instance;
+	int irq_flags = PCI_IRQ_LEGACY;
+
+	instance = pci_get_drvdata(pdev);
+	host = instance->host;
+	pci_set_power_state(pdev, PCI_D0);
+	pci_enable_wake(pdev, PCI_D0, 0);
+	pci_restore_state(pdev);
+
+	/*
+	 * PCI prepping: enable device set bus mastering and dma mask
+	 */
+	rval = pci_enable_device_mem(pdev);
+
+	if (rval) {
+		dev_err(&pdev->dev, "Enable device failed\n");
+		return rval;
+	}
+
+	pci_set_master(pdev);
+
+	if (megasas_set_dma_mask(pdev))
+		goto fail_set_dma_mask;
+
+	/*
+	 * Initialize MFI Firmware
+	 */
+
+	atomic_set(&instance->fw_outstanding, 0);
+
+	/*
+	 * We expect the FW state to be READY
+	 */
+	if (megasas_transition_to_ready(instance, 0))
+		goto fail_ready_state;
+
+	/* Now re-enable MSI-X */
+	if (instance->msix_vectors) {
+		irq_flags = PCI_IRQ_MSIX;
+		if (smp_affinity_enable)
+			irq_flags |= PCI_IRQ_AFFINITY;
+	}
+	rval = pci_alloc_irq_vectors(instance->pdev, 1,
+				     instance->msix_vectors ?
+				     instance->msix_vectors : 1, irq_flags);
+	if (rval < 0)
+		goto fail_reenable_msix;
+
+	megasas_setup_reply_map(instance);
+
+	if (instance->adapter_type != MFI_SERIES) {
+		megasas_reset_reply_desc(instance);
+		if (megasas_ioc_init_fusion(instance)) {
+			megasas_free_cmds(instance);
+			megasas_free_cmds_fusion(instance);
+			goto fail_init_mfi;
+		}
+		if (!megasas_get_map_info(instance))
+			megasas_sync_map_info(instance);
+	} else {
+		*instance->producer = 0;
+		*instance->consumer = 0;
+		if (megasas_issue_init_mfi(instance))
+			goto fail_init_mfi;
+	}
+
+	if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
+		goto fail_init_mfi;
+
+	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
+		     (unsigned long)instance);
+
+	if (instance->msix_vectors ?
+			megasas_setup_irqs_msix(instance, 0) :
+			megasas_setup_irqs_ioapic(instance))
+		goto fail_init_mfi;
+
+	/* Re-launch SR-IOV heartbeat timer */
+	if (instance->requestorId) {
+		if (!megasas_sriov_start_heartbeat(instance, 0))
+			megasas_start_timer(instance,
+					    &instance->sriov_heartbeat_timer,
+					    megasas_sriov_heartbeat_handler,
+					    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+		else {
+			instance->skip_heartbeat_timer_del = 1;
+			goto fail_init_mfi;
+		}
+	}
+
+	instance->instancet->enable_intr(instance);
+	megasas_setup_jbod_map(instance);
+	instance->unload = 0;
+
+	/*
+	 * Initiate AEN (Asynchronous Event Notification)
+	 */
+	if (megasas_start_aen(instance))
+		dev_err(&instance->pdev->dev, "Start AEN failed\n");
+
+	return 0;
+
+fail_init_mfi:
+	if (instance->evt_detail)
+		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+				instance->evt_detail,
+				instance->evt_detail_h);
+
+	if (instance->pd_info)
+		pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+					instance->pd_info,
+					instance->pd_info_h);
+	if (instance->tgt_prop)
+		pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
+					instance->tgt_prop,
+					instance->tgt_prop_h);
+
+	megasas_free_ctrl_mem(instance);
+	scsi_host_put(host);
+
+fail_set_dma_mask:
+fail_ready_state:
+fail_reenable_msix:
+
+	pci_disable_device(pdev);
+
+	return -ENODEV;
+}
+#else
+#define megasas_suspend	NULL
+#define megasas_resume	NULL
+#endif
+
+static inline int
+megasas_wait_for_adapter_operational(struct megasas_instance *instance)
+{
+	int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
+	int i;
+
+	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
+		return 1;
+
+	for (i = 0; i < wait_time; i++) {
+		if (atomic_read(&instance->adprecovery)	== MEGASAS_HBA_OPERATIONAL)
+			break;
+
+		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
+			dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
+
+		msleep(1000);
+	}
+
+	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
+		dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n",
+			__func__);
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * megasas_detach_one -	PCI hot"un"plug entry point
+ * @pdev:		PCI device structure
+ */
+static void megasas_detach_one(struct pci_dev *pdev)
+{
+	int i;
+	struct Scsi_Host *host;
+	struct megasas_instance *instance;
+	struct fusion_context *fusion;
+	u32 pd_seq_map_sz;
+
+	instance = pci_get_drvdata(pdev);
+	host = instance->host;
+	fusion = instance->ctrl_context;
+
+	/* Shutdown SR-IOV heartbeat timer */
+	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+		del_timer_sync(&instance->sriov_heartbeat_timer);
+
+	if (instance->fw_crash_state != UNAVAILABLE)
+		megasas_free_host_crash_buffer(instance);
+	scsi_remove_host(instance->host);
+	instance->unload = 1;
+
+	if (megasas_wait_for_adapter_operational(instance))
+		goto skip_firing_dcmds;
+
+	megasas_flush_cache(instance);
+	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+
+skip_firing_dcmds:
+	/* cancel the delayed work if this work still in queue*/
+	if (instance->ev != NULL) {
+		struct megasas_aen_event *ev = instance->ev;
+		cancel_delayed_work_sync(&ev->hotplug_work);
+		instance->ev = NULL;
+	}
+
+	/* cancel all wait events */
+	wake_up_all(&instance->int_cmd_wait_q);
+
+	tasklet_kill(&instance->isr_tasklet);
+
+	/*
+	 * Take the instance off the instance array. Note that we will not
+	 * decrement the max_index. We let this array be sparse array
+	 */
+	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+		if (megasas_mgmt_info.instance[i] == instance) {
+			megasas_mgmt_info.count--;
+			megasas_mgmt_info.instance[i] = NULL;
+
+			break;
+		}
+	}
+
+	instance->instancet->disable_intr(instance);
+
+	megasas_destroy_irqs(instance);
+
+	if (instance->msix_vectors)
+		pci_free_irq_vectors(instance->pdev);
+
+	if (instance->adapter_type == VENTURA_SERIES) {
+		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
+			kfree(fusion->stream_detect_by_ld[i]);
+		kfree(fusion->stream_detect_by_ld);
+		fusion->stream_detect_by_ld = NULL;
+	}
+
+
+	if (instance->adapter_type != MFI_SERIES) {
+		megasas_release_fusion(instance);
+			pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
+				(sizeof(struct MR_PD_CFG_SEQ) *
+					(MAX_PHYSICAL_DEVICES - 1));
+		for (i = 0; i < 2 ; i++) {
+			if (fusion->ld_map[i])
+				dma_free_coherent(&instance->pdev->dev,
+						  fusion->max_map_sz,
+						  fusion->ld_map[i],
+						  fusion->ld_map_phys[i]);
+			if (fusion->ld_drv_map[i]) {
+				if (is_vmalloc_addr(fusion->ld_drv_map[i]))
+					vfree(fusion->ld_drv_map[i]);
+				else
+					free_pages((ulong)fusion->ld_drv_map[i],
+						   fusion->drv_map_pages);
+			}
+
+			if (fusion->pd_seq_sync[i])
+				dma_free_coherent(&instance->pdev->dev,
+					pd_seq_map_sz,
+					fusion->pd_seq_sync[i],
+					fusion->pd_seq_phys[i]);
+		}
+	} else {
+		megasas_release_mfi(instance);
+	}
+
+	kfree(instance->ctrl_info);
+
+	if (instance->evt_detail)
+		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+				instance->evt_detail, instance->evt_detail_h);
+	if (instance->pd_info)
+		pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+					instance->pd_info,
+					instance->pd_info_h);
+	if (instance->tgt_prop)
+		pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
+					instance->tgt_prop,
+					instance->tgt_prop_h);
+	if (instance->vf_affiliation)
+		pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
+				    sizeof(struct MR_LD_VF_AFFILIATION),
+				    instance->vf_affiliation,
+				    instance->vf_affiliation_h);
+
+	if (instance->vf_affiliation_111)
+		pci_free_consistent(pdev,
+				    sizeof(struct MR_LD_VF_AFFILIATION_111),
+				    instance->vf_affiliation_111,
+				    instance->vf_affiliation_111_h);
+
+	if (instance->hb_host_mem)
+		pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
+				    instance->hb_host_mem,
+				    instance->hb_host_mem_h);
+
+	if (instance->crash_dump_buf)
+		pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
+			    instance->crash_dump_buf, instance->crash_dump_h);
+
+	if (instance->system_info_buf)
+		pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
+				    instance->system_info_buf, instance->system_info_h);
+
+	megasas_free_ctrl_mem(instance);
+
+	scsi_host_put(host);
+
+	pci_disable_device(pdev);
+}
+
+/**
+ * megasas_shutdown -	Shutdown entry point
+ * @device:		Generic device structure
+ */
+static void megasas_shutdown(struct pci_dev *pdev)
+{
+	struct megasas_instance *instance = pci_get_drvdata(pdev);
+
+	instance->unload = 1;
+
+	if (megasas_wait_for_adapter_operational(instance))
+		goto skip_firing_dcmds;
+
+	megasas_flush_cache(instance);
+	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+
+skip_firing_dcmds:
+	instance->instancet->disable_intr(instance);
+	megasas_destroy_irqs(instance);
+
+	if (instance->msix_vectors)
+		pci_free_irq_vectors(instance->pdev);
+}
+
+/**
+ * megasas_mgmt_open -	char node "open" entry point
+ */
+static int megasas_mgmt_open(struct inode *inode, struct file *filep)
+{
+	/*
+	 * Allow only those users with admin rights
+	 */
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	return 0;
+}
+
+/**
+ * megasas_mgmt_fasync -	Async notifier registration from applications
+ *
+ * This function adds the calling process to a driver global queue. When an
+ * event occurs, SIGIO will be sent to all processes in this queue.
+ */
+static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
+{
+	int rc;
+
+	mutex_lock(&megasas_async_queue_mutex);
+
+	rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
+
+	mutex_unlock(&megasas_async_queue_mutex);
+
+	if (rc >= 0) {
+		/* For sanity check when we get ioctl */
+		filep->private_data = filep;
+		return 0;
+	}
+
+	printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
+
+	return rc;
+}
+
+/**
+ * megasas_mgmt_poll -  char node "poll" entry point
+ * */
+static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
+{
+	unsigned int mask;
+	unsigned long flags;
+
+	poll_wait(file, &megasas_poll_wait, wait);
+	spin_lock_irqsave(&poll_aen_lock, flags);
+	if (megasas_poll_wait_aen)
+		mask = (POLLIN | POLLRDNORM);
+	else
+		mask = 0;
+	megasas_poll_wait_aen = 0;
+	spin_unlock_irqrestore(&poll_aen_lock, flags);
+	return mask;
+}
+
+/*
+ * megasas_set_crash_dump_params_ioctl:
+ *		Send CRASH_DUMP_MODE DCMD to all controllers
+ * @cmd:	MFI command frame
+ */
+
+static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
+{
+	struct megasas_instance *local_instance;
+	int i, error = 0;
+	int crash_support;
+
+	crash_support = cmd->frame->dcmd.mbox.w[0];
+
+	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+		local_instance = megasas_mgmt_info.instance[i];
+		if (local_instance && local_instance->crash_dump_drv_support) {
+			if ((atomic_read(&local_instance->adprecovery) ==
+				MEGASAS_HBA_OPERATIONAL) &&
+				!megasas_set_crash_dump_params(local_instance,
+					crash_support)) {
+				local_instance->crash_dump_app_support =
+					crash_support;
+				dev_info(&local_instance->pdev->dev,
+					"Application firmware crash "
+					"dump mode set success\n");
+				error = 0;
+			} else {
+				dev_info(&local_instance->pdev->dev,
+					"Application firmware crash "
+					"dump mode set failed\n");
+				error = -1;
+			}
+		}
+	}
+	return error;
+}
+
+/**
+ * megasas_mgmt_fw_ioctl -	Issues management ioctls to FW
+ * @instance:			Adapter soft state
+ * @argp:			User's ioctl packet
+ */
+static int
+megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
+		      struct megasas_iocpacket __user * user_ioc,
+		      struct megasas_iocpacket *ioc)
+{
+	struct megasas_sge32 *kern_sge32;
+	struct megasas_cmd *cmd;
+	void *kbuff_arr[MAX_IOCTL_SGE];
+	dma_addr_t buf_handle = 0;
+	int error = 0, i;
+	void *sense = NULL;
+	dma_addr_t sense_handle;
+	unsigned long *sense_ptr;
+	u32 opcode;
+
+	memset(kbuff_arr, 0, sizeof(kbuff_arr));
+
+	if (ioc->sge_count > MAX_IOCTL_SGE) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] >  max limit [%d]\n",
+		       ioc->sge_count, MAX_IOCTL_SGE);
+		return -EINVAL;
+	}
+
+	cmd = megasas_get_cmd(instance);
+	if (!cmd) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * User's IOCTL packet has 2 frames (maximum). Copy those two
+	 * frames into our cmd's frames. cmd->frame's context will get
+	 * overwritten when we copy from user's frames. So set that value
+	 * alone separately
+	 */
+	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
+	cmd->frame->hdr.context = cpu_to_le32(cmd->index);
+	cmd->frame->hdr.pad_0 = 0;
+	cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
+					       MFI_FRAME_SGL64 |
+					       MFI_FRAME_SENSE64));
+	opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
+
+	if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
+		if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
+			megasas_return_cmd(instance, cmd);
+			return -1;
+		}
+	}
+
+	if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
+		error = megasas_set_crash_dump_params_ioctl(cmd);
+		megasas_return_cmd(instance, cmd);
+		return error;
+	}
+
+	/*
+	 * The management interface between applications and the fw uses
+	 * MFI frames. E.g, RAID configuration changes, LD property changes
+	 * etc are accomplishes through different kinds of MFI frames. The
+	 * driver needs to care only about substituting user buffers with
+	 * kernel buffers in SGLs. The location of SGL is embedded in the
+	 * struct iocpacket itself.
+	 */
+	kern_sge32 = (struct megasas_sge32 *)
+	    ((unsigned long)cmd->frame + ioc->sgl_off);
+
+	/*
+	 * For each user buffer, create a mirror buffer and copy in
+	 */
+	for (i = 0; i < ioc->sge_count; i++) {
+		if (!ioc->sgl[i].iov_len)
+			continue;
+
+		kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
+						    ioc->sgl[i].iov_len,
+						    &buf_handle, GFP_KERNEL);
+		if (!kbuff_arr[i]) {
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
+			       "kernel SGL buffer for IOCTL\n");
+			error = -ENOMEM;
+			goto out;
+		}
+
+		/*
+		 * We don't change the dma_coherent_mask, so
+		 * pci_alloc_consistent only returns 32bit addresses
+		 */
+		kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
+		kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
+
+		/*
+		 * We created a kernel buffer corresponding to the
+		 * user buffer. Now copy in from the user buffer
+		 */
+		if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
+				   (u32) (ioc->sgl[i].iov_len))) {
+			error = -EFAULT;
+			goto out;
+		}
+	}
+
+	if (ioc->sense_len) {
+		sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
+					     &sense_handle, GFP_KERNEL);
+		if (!sense) {
+			error = -ENOMEM;
+			goto out;
+		}
+
+		sense_ptr =
+		(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
+		*sense_ptr = cpu_to_le32(sense_handle);
+	}
+
+	/*
+	 * Set the sync_cmd flag so that the ISR knows not to complete this
+	 * cmd to the SCSI mid-layer
+	 */
+	cmd->sync_cmd = 1;
+	if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
+		cmd->sync_cmd = 0;
+		dev_err(&instance->pdev->dev,
+			"return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n",
+			__func__, __LINE__, opcode,	cmd->cmd_status_drv);
+		return -EBUSY;
+	}
+
+	cmd->sync_cmd = 0;
+
+	if (instance->unload == 1) {
+		dev_info(&instance->pdev->dev, "Driver unload is in progress "
+			"don't submit data to application\n");
+		goto out;
+	}
+	/*
+	 * copy out the kernel buffers to user buffers
+	 */
+	for (i = 0; i < ioc->sge_count; i++) {
+		if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
+				 ioc->sgl[i].iov_len)) {
+			error = -EFAULT;
+			goto out;
+		}
+	}
+
+	/*
+	 * copy out the sense
+	 */
+	if (ioc->sense_len) {
+		/*
+		 * sense_ptr points to the location that has the user
+		 * sense buffer address
+		 */
+		sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
+				ioc->sense_off);
+
+		if (copy_to_user((void __user *)((unsigned long)
+				 get_unaligned((unsigned long *)sense_ptr)),
+				 sense, ioc->sense_len)) {
+			dev_err(&instance->pdev->dev, "Failed to copy out to user "
+					"sense data\n");
+			error = -EFAULT;
+			goto out;
+		}
+	}
+
+	/*
+	 * copy the status codes returned by the fw
+	 */
+	if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
+			 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
+		error = -EFAULT;
+	}
+
+out:
+	if (sense) {
+		dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
+				    sense, sense_handle);
+	}
+
+	for (i = 0; i < ioc->sge_count; i++) {
+		if (kbuff_arr[i]) {
+			dma_free_coherent(&instance->pdev->dev,
+					  le32_to_cpu(kern_sge32[i].length),
+					  kbuff_arr[i],
+					  le32_to_cpu(kern_sge32[i].phys_addr));
+			kbuff_arr[i] = NULL;
+		}
+	}
+
+	megasas_return_cmd(instance, cmd);
+	return error;
+}
+
+static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
+{
+	struct megasas_iocpacket __user *user_ioc =
+	    (struct megasas_iocpacket __user *)arg;
+	struct megasas_iocpacket *ioc;
+	struct megasas_instance *instance;
+	int error;
+	int i;
+	unsigned long flags;
+	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
+
+	ioc = memdup_user(user_ioc, sizeof(*ioc));
+	if (IS_ERR(ioc))
+		return PTR_ERR(ioc);
+
+	instance = megasas_lookup_instance(ioc->host_no);
+	if (!instance) {
+		error = -ENODEV;
+		goto out_kfree_ioc;
+	}
+
+	/* Adjust ioctl wait time for VF mode */
+	if (instance->requestorId)
+		wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
+
+	/* Block ioctls in VF mode */
+	if (instance->requestorId && !allow_vf_ioctls) {
+		error = -ENODEV;
+		goto out_kfree_ioc;
+	}
+
+	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+		dev_err(&instance->pdev->dev, "Controller in crit error\n");
+		error = -ENODEV;
+		goto out_kfree_ioc;
+	}
+
+	if (instance->unload == 1) {
+		error = -ENODEV;
+		goto out_kfree_ioc;
+	}
+
+	if (down_interruptible(&instance->ioctl_sem)) {
+		error = -ERESTARTSYS;
+		goto out_kfree_ioc;
+	}
+
+	for (i = 0; i < wait_time; i++) {
+
+		spin_lock_irqsave(&instance->hba_lock, flags);
+		if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
+			spin_unlock_irqrestore(&instance->hba_lock, flags);
+			break;
+		}
+		spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+			dev_notice(&instance->pdev->dev, "waiting"
+				"for controller reset to finish\n");
+		}
+
+		msleep(1000);
+	}
+
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
+		spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+		dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
+		error = -ENODEV;
+		goto out_up;
+	}
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+	error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
+out_up:
+	up(&instance->ioctl_sem);
+
+out_kfree_ioc:
+	kfree(ioc);
+	return error;
+}
+
+static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
+{
+	struct megasas_instance *instance;
+	struct megasas_aen aen;
+	int error;
+	int i;
+	unsigned long flags;
+	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
+
+	if (file->private_data != file) {
+		printk(KERN_DEBUG "megasas: fasync_helper was not "
+		       "called first\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
+		return -EFAULT;
+
+	instance = megasas_lookup_instance(aen.host_no);
+
+	if (!instance)
+		return -ENODEV;
+
+	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+		return -ENODEV;
+	}
+
+	if (instance->unload == 1) {
+		return -ENODEV;
+	}
+
+	for (i = 0; i < wait_time; i++) {
+
+		spin_lock_irqsave(&instance->hba_lock, flags);
+		if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
+			spin_unlock_irqrestore(&instance->hba_lock,
+						flags);
+			break;
+		}
+
+		spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+			dev_notice(&instance->pdev->dev, "waiting for"
+				"controller reset to finish\n");
+		}
+
+		msleep(1000);
+	}
+
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
+		spin_unlock_irqrestore(&instance->hba_lock, flags);
+		dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
+		return -ENODEV;
+	}
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+	mutex_lock(&instance->reset_mutex);
+	error = megasas_register_aen(instance, aen.seq_num,
+				     aen.class_locale_word);
+	mutex_unlock(&instance->reset_mutex);
+	return error;
+}
+
+/**
+ * megasas_mgmt_ioctl -	char node ioctl entry point
+ */
+static long
+megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case MEGASAS_IOC_FIRMWARE:
+		return megasas_mgmt_ioctl_fw(file, arg);
+
+	case MEGASAS_IOC_GET_AEN:
+		return megasas_mgmt_ioctl_aen(file, arg);
+	}
+
+	return -ENOTTY;
+}
+
+#ifdef CONFIG_COMPAT
+static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
+{
+	struct compat_megasas_iocpacket __user *cioc =
+	    (struct compat_megasas_iocpacket __user *)arg;
+	struct megasas_iocpacket __user *ioc =
+	    compat_alloc_user_space(sizeof(struct megasas_iocpacket));
+	int i;
+	int error = 0;
+	compat_uptr_t ptr;
+	u32 local_sense_off;
+	u32 local_sense_len;
+	u32 user_sense_off;
+
+	if (clear_user(ioc, sizeof(*ioc)))
+		return -EFAULT;
+
+	if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
+	    copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
+	    copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
+	    copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
+	    copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
+	    copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
+		return -EFAULT;
+
+	/*
+	 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
+	 * sense_len is not null, so prepare the 64bit value under
+	 * the same condition.
+	 */
+	if (get_user(local_sense_off, &ioc->sense_off) ||
+		get_user(local_sense_len, &ioc->sense_len) ||
+		get_user(user_sense_off, &cioc->sense_off))
+		return -EFAULT;
+
+	if (local_sense_off != user_sense_off)
+		return -EINVAL;
+
+	if (local_sense_len) {
+		void __user **sense_ioc_ptr =
+			(void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
+		compat_uptr_t *sense_cioc_ptr =
+			(compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
+		if (get_user(ptr, sense_cioc_ptr) ||
+		    put_user(compat_ptr(ptr), sense_ioc_ptr))
+			return -EFAULT;
+	}
+
+	for (i = 0; i < MAX_IOCTL_SGE; i++) {
+		if (get_user(ptr, &cioc->sgl[i].iov_base) ||
+		    put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
+		    copy_in_user(&ioc->sgl[i].iov_len,
+				 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
+			return -EFAULT;
+	}
+
+	error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
+
+	if (copy_in_user(&cioc->frame.hdr.cmd_status,
+			 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
+		printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
+		return -EFAULT;
+	}
+	return error;
+}
+
+static long
+megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
+			  unsigned long arg)
+{
+	switch (cmd) {
+	case MEGASAS_IOC_FIRMWARE32:
+		return megasas_mgmt_compat_ioctl_fw(file, arg);
+	case MEGASAS_IOC_GET_AEN:
+		return megasas_mgmt_ioctl_aen(file, arg);
+	}
+
+	return -ENOTTY;
+}
+#endif
+
+/*
+ * File operations structure for management interface
+ */
+static const struct file_operations megasas_mgmt_fops = {
+	.owner = THIS_MODULE,
+	.open = megasas_mgmt_open,
+	.fasync = megasas_mgmt_fasync,
+	.unlocked_ioctl = megasas_mgmt_ioctl,
+	.poll = megasas_mgmt_poll,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = megasas_mgmt_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+/*
+ * PCI hotplug support registration structure
+ */
+static struct pci_driver megasas_pci_driver = {
+
+	.name = "megaraid_sas",
+	.id_table = megasas_pci_table,
+	.probe = megasas_probe_one,
+	.remove = megasas_detach_one,
+	.suspend = megasas_suspend,
+	.resume = megasas_resume,
+	.shutdown = megasas_shutdown,
+};
+
+/*
+ * Sysfs driver attributes
+ */
+static ssize_t version_show(struct device_driver *dd, char *buf)
+{
+	return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
+			MEGASAS_VERSION);
+}
+static DRIVER_ATTR_RO(version);
+
+static ssize_t release_date_show(struct device_driver *dd, char *buf)
+{
+	return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
+		MEGASAS_RELDATE);
+}
+static DRIVER_ATTR_RO(release_date);
+
+static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
+{
+	return sprintf(buf, "%u\n", support_poll_for_event);
+}
+static DRIVER_ATTR_RO(support_poll_for_event);
+
+static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
+{
+	return sprintf(buf, "%u\n", support_device_change);
+}
+static DRIVER_ATTR_RO(support_device_change);
+
+static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
+{
+	return sprintf(buf, "%u\n", megasas_dbg_lvl);
+}
+
+static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
+			     size_t count)
+{
+	int retval = count;
+
+	if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
+		printk(KERN_ERR "megasas: could not set dbg_lvl\n");
+		retval = -EINVAL;
+	}
+	return retval;
+}
+static DRIVER_ATTR_RW(dbg_lvl);
+
+static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
+{
+	sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
+	scsi_remove_device(sdev);
+	scsi_device_put(sdev);
+}
+
+static void
+megasas_aen_polling(struct work_struct *work)
+{
+	struct megasas_aen_event *ev =
+		container_of(work, struct megasas_aen_event, hotplug_work.work);
+	struct megasas_instance *instance = ev->instance;
+	union megasas_evt_class_locale class_locale;
+	struct  Scsi_Host *host;
+	struct  scsi_device *sdev1;
+	u16     pd_index = 0;
+	u16	ld_index = 0;
+	int     i, j, doscan = 0;
+	u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
+	int error;
+	u8  dcmd_ret = DCMD_SUCCESS;
+
+	if (!instance) {
+		printk(KERN_ERR "invalid instance!\n");
+		kfree(ev);
+		return;
+	}
+
+	/* Adjust event workqueue thread wait time for VF mode */
+	if (instance->requestorId)
+		wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
+
+	/* Don't run the event workqueue thread if OCR is running */
+	mutex_lock(&instance->reset_mutex);
+
+	instance->ev = NULL;
+	host = instance->host;
+	if (instance->evt_detail) {
+		megasas_decode_evt(instance);
+
+		switch (le32_to_cpu(instance->evt_detail->code)) {
+
+		case MR_EVT_PD_INSERTED:
+		case MR_EVT_PD_REMOVED:
+			dcmd_ret = megasas_get_pd_list(instance);
+			if (dcmd_ret == DCMD_SUCCESS)
+				doscan = SCAN_PD_CHANNEL;
+			break;
+
+		case MR_EVT_LD_OFFLINE:
+		case MR_EVT_CFG_CLEARED:
+		case MR_EVT_LD_DELETED:
+		case MR_EVT_LD_CREATED:
+			if (!instance->requestorId ||
+				(instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
+				dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
+
+			if (dcmd_ret == DCMD_SUCCESS)
+				doscan = SCAN_VD_CHANNEL;
+
+			break;
+
+		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
+		case MR_EVT_FOREIGN_CFG_IMPORTED:
+		case MR_EVT_LD_STATE_CHANGE:
+			dcmd_ret = megasas_get_pd_list(instance);
+
+			if (dcmd_ret != DCMD_SUCCESS)
+				break;
+
+			if (!instance->requestorId ||
+				(instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
+				dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
+
+			if (dcmd_ret != DCMD_SUCCESS)
+				break;
+
+			doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
+			dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
+				instance->host->host_no);
+			break;
+
+		case MR_EVT_CTRL_PROP_CHANGED:
+				dcmd_ret = megasas_get_ctrl_info(instance);
+				break;
+		default:
+			doscan = 0;
+			break;
+		}
+	} else {
+		dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
+		mutex_unlock(&instance->reset_mutex);
+		kfree(ev);
+		return;
+	}
+
+	mutex_unlock(&instance->reset_mutex);
+
+	if (doscan & SCAN_PD_CHANNEL) {
+		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+				pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
+				sdev1 = scsi_device_lookup(host, i, j, 0);
+				if (instance->pd_list[pd_index].driveState ==
+							MR_PD_STATE_SYSTEM) {
+					if (!sdev1)
+						scsi_add_device(host, i, j, 0);
+					else
+						scsi_device_put(sdev1);
+				} else {
+					if (sdev1)
+						megasas_remove_scsi_device(sdev1);
+				}
+			}
+		}
+	}
+
+	if (doscan & SCAN_VD_CHANNEL) {
+		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+				ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+				sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+				if (instance->ld_ids[ld_index] != 0xff) {
+					if (!sdev1)
+						scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+					else
+						scsi_device_put(sdev1);
+				} else {
+					if (sdev1)
+						megasas_remove_scsi_device(sdev1);
+				}
+			}
+		}
+	}
+
+	if (dcmd_ret == DCMD_SUCCESS)
+		seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
+	else
+		seq_num = instance->last_seq_num;
+
+	/* Register AEN with FW for latest sequence number plus 1 */
+	class_locale.members.reserved = 0;
+	class_locale.members.locale = MR_EVT_LOCALE_ALL;
+	class_locale.members.class = MR_EVT_CLASS_DEBUG;
+
+	if (instance->aen_cmd != NULL) {
+		kfree(ev);
+		return;
+	}
+
+	mutex_lock(&instance->reset_mutex);
+	error = megasas_register_aen(instance, seq_num,
+					class_locale.word);
+	if (error)
+		dev_err(&instance->pdev->dev,
+			"register aen failed error %x\n", error);
+
+	mutex_unlock(&instance->reset_mutex);
+	kfree(ev);
+}
+
+/**
+ * megasas_init - Driver load entry point
+ */
+static int __init megasas_init(void)
+{
+	int rval;
+
+	/*
+	 * Booted in kdump kernel, minimize memory footprints by
+	 * disabling few features
+	 */
+	if (reset_devices) {
+		msix_vectors = 1;
+		rdpq_enable = 0;
+		dual_qdepth_disable = 1;
+	}
+
+	/*
+	 * Announce driver version and other information
+	 */
+	pr_info("megasas: %s\n", MEGASAS_VERSION);
+
+	spin_lock_init(&poll_aen_lock);
+
+	support_poll_for_event = 2;
+	support_device_change = 1;
+
+	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
+
+	/*
+	 * Register character device node
+	 */
+	rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
+
+	if (rval < 0) {
+		printk(KERN_DEBUG "megasas: failed to open device node\n");
+		return rval;
+	}
+
+	megasas_mgmt_majorno = rval;
+
+	/*
+	 * Register ourselves as PCI hotplug module
+	 */
+	rval = pci_register_driver(&megasas_pci_driver);
+
+	if (rval) {
+		printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
+		goto err_pcidrv;
+	}
+
+	rval = driver_create_file(&megasas_pci_driver.driver,
+				  &driver_attr_version);
+	if (rval)
+		goto err_dcf_attr_ver;
+
+	rval = driver_create_file(&megasas_pci_driver.driver,
+				  &driver_attr_release_date);
+	if (rval)
+		goto err_dcf_rel_date;
+
+	rval = driver_create_file(&megasas_pci_driver.driver,
+				&driver_attr_support_poll_for_event);
+	if (rval)
+		goto err_dcf_support_poll_for_event;
+
+	rval = driver_create_file(&megasas_pci_driver.driver,
+				  &driver_attr_dbg_lvl);
+	if (rval)
+		goto err_dcf_dbg_lvl;
+	rval = driver_create_file(&megasas_pci_driver.driver,
+				&driver_attr_support_device_change);
+	if (rval)
+		goto err_dcf_support_device_change;
+
+	return rval;
+
+err_dcf_support_device_change:
+	driver_remove_file(&megasas_pci_driver.driver,
+			   &driver_attr_dbg_lvl);
+err_dcf_dbg_lvl:
+	driver_remove_file(&megasas_pci_driver.driver,
+			&driver_attr_support_poll_for_event);
+err_dcf_support_poll_for_event:
+	driver_remove_file(&megasas_pci_driver.driver,
+			   &driver_attr_release_date);
+err_dcf_rel_date:
+	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
+err_dcf_attr_ver:
+	pci_unregister_driver(&megasas_pci_driver);
+err_pcidrv:
+	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
+	return rval;
+}
+
+/**
+ * megasas_exit - Driver unload entry point
+ */
+static void __exit megasas_exit(void)
+{
+	driver_remove_file(&megasas_pci_driver.driver,
+			   &driver_attr_dbg_lvl);
+	driver_remove_file(&megasas_pci_driver.driver,
+			&driver_attr_support_poll_for_event);
+	driver_remove_file(&megasas_pci_driver.driver,
+			&driver_attr_support_device_change);
+	driver_remove_file(&megasas_pci_driver.driver,
+			   &driver_attr_release_date);
+	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
+
+	pci_unregister_driver(&megasas_pci_driver);
+	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
+}
+
+module_init(megasas_init);
+module_exit(megasas_exit);
diff --git a/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_sas_fp.c b/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_sas_fp.c
new file mode 100644
index 0000000..9a2a62e
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -0,0 +1,1361 @@
+/*
+ *  Linux MegaRAID driver for SAS based RAID controllers
+ *
+ *  Copyright (c) 2009-2013  LSI Corporation
+ *  Copyright (c) 2013-2014  Avago Technologies
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ *  FILE: megaraid_sas_fp.c
+ *
+ *  Authors: Avago Technologies
+ *           Sumant Patro
+ *           Varad Talamacki
+ *           Manoj Jose
+ *           Kashyap Desai <kashyap.desai@avagotech.com>
+ *           Sumit Saxena <sumit.saxena@avagotech.com>
+ *
+ *  Send feedback to: megaraidlinux.pdl@avagotech.com
+ *
+ *  Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
+ *  San Jose, California 95131
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/compat.h>
+#include <linux/blkdev.h>
+#include <linux/poll.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "megaraid_sas_fusion.h"
+#include "megaraid_sas.h"
+#include <asm/div64.h>
+
+#define LB_PENDING_CMDS_DEFAULT 4
+static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
+module_param(lb_pending_cmds, int, S_IRUGO);
+MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
+	"threshold. Valid Values are 1-128. Default: 4");
+
+
+#define ABS_DIFF(a, b)   (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
+#define MR_LD_STATE_OPTIMAL 3
+
+#define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
+#define SPAN_ROW_DATA_SIZE(map_, ld, index_)   (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
+#define SPAN_INVALID  0xff
+
+/* Prototypes */
+static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
+	PLD_SPAN_INFO ldSpanInfo);
+static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
+	u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
+	struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map);
+static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld,
+	u64 strip, struct MR_DRV_RAID_MAP_ALL *map);
+
+u32 mega_mod64(u64 dividend, u32 divisor)
+{
+	u64 d;
+	u32 remainder;
+
+	if (!divisor)
+		printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n");
+	d = dividend;
+	remainder = do_div(d, divisor);
+	return remainder;
+}
+
+/**
+ * @param dividend    : Dividend
+ * @param divisor    : Divisor
+ *
+ * @return quotient
+ **/
+u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
+{
+	u32 remainder;
+	u64 d;
+
+	if (!divisor)
+		printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n");
+
+	d = dividend;
+	remainder = do_div(d, divisor);
+
+	return d;
+}
+
+struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
+{
+	return &map->raidMap.ldSpanMap[ld].ldRaid;
+}
+
+static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld,
+						   struct MR_DRV_RAID_MAP_ALL
+						   *map)
+{
+	return &map->raidMap.ldSpanMap[ld].spanBlock[0];
+}
+
+static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map)
+{
+	return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
+}
+
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map)
+{
+	return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
+}
+
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map)
+{
+	return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
+}
+
+__le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
+{
+	return map->raidMap.devHndlInfo[pd].curDevHdl;
+}
+
+static u8 MR_PdInterfaceTypeGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
+{
+	return map->raidMap.devHndlInfo[pd].interfaceType;
+}
+
+u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
+{
+	return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
+}
+
+u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map)
+{
+	return map->raidMap.ldTgtIdToLd[ldTgtId];
+}
+
+static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
+					  struct MR_DRV_RAID_MAP_ALL *map)
+{
+	return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
+}
+
+/*
+ * This function will Populate Driver Map using firmware raid map
+ */
+static int MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+{
+	struct fusion_context *fusion = instance->ctrl_context;
+	struct MR_FW_RAID_MAP_ALL     *fw_map_old    = NULL;
+	struct MR_FW_RAID_MAP         *pFwRaidMap    = NULL;
+	int i, j;
+	u16 ld_count;
+	struct MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn;
+	struct MR_FW_RAID_MAP_EXT *fw_map_ext;
+	struct MR_RAID_MAP_DESC_TABLE *desc_table;
+
+
+	struct MR_DRV_RAID_MAP_ALL *drv_map =
+			fusion->ld_drv_map[(instance->map_id & 1)];
+	struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
+	void *raid_map_data = NULL;
+
+	memset(drv_map, 0, fusion->drv_map_sz);
+	memset(pDrvRaidMap->ldTgtIdToLd,
+	       0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN));
+
+	if (instance->max_raid_mapsize) {
+		fw_map_dyn = fusion->ld_map[(instance->map_id & 1)];
+		desc_table =
+		(struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset));
+		if (desc_table != fw_map_dyn->raid_map_desc_table)
+			dev_dbg(&instance->pdev->dev, "offsets of desc table are not matching desc %p original %p\n",
+				desc_table, fw_map_dyn->raid_map_desc_table);
+
+		ld_count = (u16)le16_to_cpu(fw_map_dyn->ld_count);
+		pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
+		pDrvRaidMap->fpPdIoTimeoutSec =
+			fw_map_dyn->fp_pd_io_timeout_sec;
+		pDrvRaidMap->totalSize =
+			cpu_to_le32(sizeof(struct MR_DRV_RAID_MAP_ALL));
+		/* point to actual data starting point*/
+		raid_map_data = (void *)fw_map_dyn +
+			le32_to_cpu(fw_map_dyn->desc_table_offset) +
+			le32_to_cpu(fw_map_dyn->desc_table_size);
+
+		for (i = 0; i < le32_to_cpu(fw_map_dyn->desc_table_num_elements); ++i) {
+			switch (le32_to_cpu(desc_table->raid_map_desc_type)) {
+			case RAID_MAP_DESC_TYPE_DEVHDL_INFO:
+				fw_map_dyn->dev_hndl_info =
+				(struct MR_DEV_HANDLE_INFO *)(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
+				memcpy(pDrvRaidMap->devHndlInfo,
+					fw_map_dyn->dev_hndl_info,
+					sizeof(struct MR_DEV_HANDLE_INFO) *
+					le32_to_cpu(desc_table->raid_map_desc_elements));
+			break;
+			case RAID_MAP_DESC_TYPE_TGTID_INFO:
+				fw_map_dyn->ld_tgt_id_to_ld =
+					(u16 *)(raid_map_data +
+					le32_to_cpu(desc_table->raid_map_desc_offset));
+				for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) {
+					pDrvRaidMap->ldTgtIdToLd[j] =
+						le16_to_cpu(fw_map_dyn->ld_tgt_id_to_ld[j]);
+				}
+			break;
+			case RAID_MAP_DESC_TYPE_ARRAY_INFO:
+				fw_map_dyn->ar_map_info =
+					(struct MR_ARRAY_INFO *)
+					(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
+				memcpy(pDrvRaidMap->arMapInfo,
+				       fw_map_dyn->ar_map_info,
+				       sizeof(struct MR_ARRAY_INFO) *
+				       le32_to_cpu(desc_table->raid_map_desc_elements));
+			break;
+			case RAID_MAP_DESC_TYPE_SPAN_INFO:
+				fw_map_dyn->ld_span_map =
+					(struct MR_LD_SPAN_MAP *)
+					(raid_map_data +
+					le32_to_cpu(desc_table->raid_map_desc_offset));
+				memcpy(pDrvRaidMap->ldSpanMap,
+				       fw_map_dyn->ld_span_map,
+				       sizeof(struct MR_LD_SPAN_MAP) *
+				       le32_to_cpu(desc_table->raid_map_desc_elements));
+			break;
+			default:
+				dev_dbg(&instance->pdev->dev, "wrong number of desctableElements %d\n",
+					fw_map_dyn->desc_table_num_elements);
+			}
+			++desc_table;
+		}
+
+	} else if (instance->supportmax256vd) {
+		fw_map_ext =
+			(struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(instance->map_id & 1)];
+		ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
+		if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
+			dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n");
+			return 1;
+		}
+
+		pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
+		pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec;
+		for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++)
+			pDrvRaidMap->ldTgtIdToLd[i] =
+				(u16)fw_map_ext->ldTgtIdToLd[i];
+		memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap,
+		       sizeof(struct MR_LD_SPAN_MAP) * ld_count);
+		memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo,
+		       sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT);
+		memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo,
+		       sizeof(struct MR_DEV_HANDLE_INFO) *
+		       MAX_RAIDMAP_PHYSICAL_DEVICES);
+
+		/* New Raid map will not set totalSize, so keep expected value
+		 * for legacy code in ValidateMapInfo
+		 */
+		pDrvRaidMap->totalSize =
+			cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT));
+	} else {
+		fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
+			fusion->ld_map[(instance->map_id & 1)];
+		pFwRaidMap = &fw_map_old->raidMap;
+		ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
+		if (ld_count > MAX_LOGICAL_DRIVES) {
+			dev_dbg(&instance->pdev->dev,
+				"LD count exposed in RAID map in not valid\n");
+			return 1;
+		}
+
+		pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
+		pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
+		pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
+		for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
+			pDrvRaidMap->ldTgtIdToLd[i] =
+				(u8)pFwRaidMap->ldTgtIdToLd[i];
+		for (i = 0; i < ld_count; i++) {
+			pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
+		}
+		memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo,
+			sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS);
+		memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo,
+			sizeof(struct MR_DEV_HANDLE_INFO) *
+			MAX_RAIDMAP_PHYSICAL_DEVICES);
+	}
+
+	return 0;
+}
+
+/*
+ * This function will validate Map info data provided by FW
+ */
+u8 MR_ValidateMapInfo(struct megasas_instance *instance)
+{
+	struct fusion_context *fusion;
+	struct MR_DRV_RAID_MAP_ALL *drv_map;
+	struct MR_DRV_RAID_MAP *pDrvRaidMap;
+	struct LD_LOAD_BALANCE_INFO *lbInfo;
+	PLD_SPAN_INFO ldSpanInfo;
+	struct MR_LD_RAID         *raid;
+	u16 num_lds, i;
+	u16 ld;
+	u32 expected_size;
+
+	if (MR_PopulateDrvRaidMap(instance))
+		return 0;
+
+	fusion = instance->ctrl_context;
+	drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+	pDrvRaidMap = &drv_map->raidMap;
+
+	lbInfo = fusion->load_balance_info;
+	ldSpanInfo = fusion->log_to_span;
+
+	if (instance->max_raid_mapsize)
+		expected_size = sizeof(struct MR_DRV_RAID_MAP_ALL);
+	else if (instance->supportmax256vd)
+		expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
+	else
+		expected_size =
+			(sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) +
+			(sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
+
+	if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
+		dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x",
+			le32_to_cpu(pDrvRaidMap->totalSize));
+		dev_dbg(&instance->pdev->dev, "is not matching expected size 0x%x\n",
+			(unsigned int)expected_size);
+		dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
+			(unsigned int)sizeof(struct MR_LD_SPAN_MAP),
+			le32_to_cpu(pDrvRaidMap->totalSize));
+		return 0;
+	}
+
+	if (instance->UnevenSpanSupport)
+		mr_update_span_set(drv_map, ldSpanInfo);
+
+	if (lbInfo)
+		mr_update_load_balance_params(drv_map, lbInfo);
+
+	num_lds = le16_to_cpu(drv_map->raidMap.ldCount);
+
+	/*Convert Raid capability values to CPU arch */
+	for (i = 0; (num_lds > 0) && (i < MAX_LOGICAL_DRIVES_EXT); i++) {
+		ld = MR_TargetIdToLdGet(i, drv_map);
+
+		/* For non existing VDs, iterate to next VD*/
+		if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
+			continue;
+
+		raid = MR_LdRaidGet(ld, drv_map);
+		le32_to_cpus((u32 *)&raid->capability);
+
+		num_lds--;
+	}
+
+	return 1;
+}
+
+u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
+		    struct MR_DRV_RAID_MAP_ALL *map)
+{
+	struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
+	struct MR_QUAD_ELEMENT    *quad;
+	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
+	u32                span, j;
+
+	for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
+
+		for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) {
+			quad = &pSpanBlock->block_span_info.quad[j];
+
+			if (le32_to_cpu(quad->diff) == 0)
+				return SPAN_INVALID;
+			if (le64_to_cpu(quad->logStart) <= row && row <=
+				le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
+				le32_to_cpu(quad->diff))) == 0) {
+				if (span_blk != NULL) {
+					u64  blk, debugBlk;
+					blk =  mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
+					debugBlk = blk;
+
+					blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
+					*span_blk = blk;
+				}
+				return span;
+			}
+		}
+	}
+	return SPAN_INVALID;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the Span block for given row using spanset.
+*
+* Inputs :
+*    instance - HBA instance
+*    ld   - Logical drive number
+*    row        - Row number
+*    map    - LD map
+*
+* Outputs :
+*
+*    span          - Span number
+*    block         - Absolute Block number in the physical disk
+*    div_error	   - Devide error code.
+*/
+
+u32 mr_spanset_get_span_block(struct megasas_instance *instance,
+		u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map)
+{
+	struct fusion_context *fusion = instance->ctrl_context;
+	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
+	LD_SPAN_SET *span_set;
+	struct MR_QUAD_ELEMENT    *quad;
+	u32    span, info;
+	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+
+	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+		span_set = &(ldSpanInfo[ld].span_set[info]);
+
+		if (span_set->span_row_data_width == 0)
+			break;
+
+		if (row > span_set->data_row_end)
+			continue;
+
+		for (span = 0; span < raid->spanDepth; span++)
+			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+				block_span_info.noElements) >= info+1) {
+				quad = &map->raidMap.ldSpanMap[ld].
+					spanBlock[span].
+					block_span_info.quad[info];
+				if (le32_to_cpu(quad->diff) == 0)
+					return SPAN_INVALID;
+				if (le64_to_cpu(quad->logStart) <= row  &&
+					row <= le64_to_cpu(quad->logEnd)  &&
+					(mega_mod64(row - le64_to_cpu(quad->logStart),
+						le32_to_cpu(quad->diff))) == 0) {
+					if (span_blk != NULL) {
+						u64  blk;
+						blk = mega_div64_32
+						    ((row - le64_to_cpu(quad->logStart)),
+						    le32_to_cpu(quad->diff));
+						blk = (blk + le64_to_cpu(quad->offsetInSpan))
+							 << raid->stripeShift;
+						*span_blk = blk;
+					}
+					return span;
+				}
+			}
+	}
+	return SPAN_INVALID;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the row for given strip using spanset.
+*
+* Inputs :
+*    instance - HBA instance
+*    ld   - Logical drive number
+*    Strip        - Strip
+*    map    - LD map
+*
+* Outputs :
+*
+*    row         - row associated with strip
+*/
+
+static u64  get_row_from_strip(struct megasas_instance *instance,
+	u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
+{
+	struct fusion_context *fusion = instance->ctrl_context;
+	struct MR_LD_RAID	*raid = MR_LdRaidGet(ld, map);
+	LD_SPAN_SET	*span_set;
+	PLD_SPAN_INFO	ldSpanInfo = fusion->log_to_span;
+	u32		info, strip_offset, span, span_offset;
+	u64		span_set_Strip, span_set_Row, retval;
+
+	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+		span_set = &(ldSpanInfo[ld].span_set[info]);
+
+		if (span_set->span_row_data_width == 0)
+			break;
+		if (strip > span_set->data_strip_end)
+			continue;
+
+		span_set_Strip = strip - span_set->data_strip_start;
+		strip_offset = mega_mod64(span_set_Strip,
+				span_set->span_row_data_width);
+		span_set_Row = mega_div64_32(span_set_Strip,
+				span_set->span_row_data_width) * span_set->diff;
+		for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
+			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+				block_span_info.noElements) >= info+1) {
+				if (strip_offset >=
+					span_set->strip_offset[span])
+					span_offset++;
+				else
+					break;
+			}
+
+		retval = (span_set->data_row_start + span_set_Row +
+				(span_offset - 1));
+		return retval;
+	}
+	return -1LLU;
+}
+
+
+/*
+******************************************************************************
+*
+* This routine calculates the Start Strip for given row using spanset.
+*
+* Inputs :
+*    instance - HBA instance
+*    ld   - Logical drive number
+*    row        - Row number
+*    map    - LD map
+*
+* Outputs :
+*
+*    Strip         - Start strip associated with row
+*/
+
+static u64 get_strip_from_row(struct megasas_instance *instance,
+		u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map)
+{
+	struct fusion_context *fusion = instance->ctrl_context;
+	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
+	LD_SPAN_SET *span_set;
+	struct MR_QUAD_ELEMENT    *quad;
+	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+	u32    span, info;
+	u64  strip;
+
+	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+		span_set = &(ldSpanInfo[ld].span_set[info]);
+
+		if (span_set->span_row_data_width == 0)
+			break;
+		if (row > span_set->data_row_end)
+			continue;
+
+		for (span = 0; span < raid->spanDepth; span++)
+			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+				block_span_info.noElements) >= info+1) {
+				quad = &map->raidMap.ldSpanMap[ld].
+					spanBlock[span].block_span_info.quad[info];
+				if (le64_to_cpu(quad->logStart) <= row  &&
+					row <= le64_to_cpu(quad->logEnd)  &&
+					mega_mod64((row - le64_to_cpu(quad->logStart)),
+					le32_to_cpu(quad->diff)) == 0) {
+					strip = mega_div64_32
+						(((row - span_set->data_row_start)
+							- le64_to_cpu(quad->logStart)),
+							le32_to_cpu(quad->diff));
+					strip *= span_set->span_row_data_width;
+					strip += span_set->data_strip_start;
+					strip += span_set->strip_offset[span];
+					return strip;
+				}
+			}
+	}
+	dev_err(&instance->pdev->dev, "get_strip_from_row"
+		"returns invalid strip for ld=%x, row=%lx\n",
+		ld, (long unsigned int)row);
+	return -1;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the Physical Arm for given strip using spanset.
+*
+* Inputs :
+*    instance - HBA instance
+*    ld   - Logical drive number
+*    strip      - Strip
+*    map    - LD map
+*
+* Outputs :
+*
+*    Phys Arm         - Phys Arm associated with strip
+*/
+
+static u32 get_arm_from_strip(struct megasas_instance *instance,
+	u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
+{
+	struct fusion_context *fusion = instance->ctrl_context;
+	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
+	LD_SPAN_SET *span_set;
+	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+	u32    info, strip_offset, span, span_offset, retval;
+
+	for (info = 0 ; info < MAX_QUAD_DEPTH; info++) {
+		span_set = &(ldSpanInfo[ld].span_set[info]);
+
+		if (span_set->span_row_data_width == 0)
+			break;
+		if (strip > span_set->data_strip_end)
+			continue;
+
+		strip_offset = (uint)mega_mod64
+				((strip - span_set->data_strip_start),
+				span_set->span_row_data_width);
+
+		for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
+			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+				block_span_info.noElements) >= info+1) {
+				if (strip_offset >=
+					span_set->strip_offset[span])
+					span_offset =
+						span_set->strip_offset[span];
+				else
+					break;
+			}
+
+		retval = (strip_offset - span_offset);
+		return retval;
+	}
+
+	dev_err(&instance->pdev->dev, "get_arm_from_strip"
+		"returns invalid arm for ld=%x strip=%lx\n",
+		ld, (long unsigned int)strip);
+
+	return -1;
+}
+
+/* This Function will return Phys arm */
+u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
+		struct MR_DRV_RAID_MAP_ALL *map)
+{
+	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
+	/* Need to check correct default value */
+	u32    arm = 0;
+
+	switch (raid->level) {
+	case 0:
+	case 5:
+	case 6:
+		arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
+		break;
+	case 1:
+		/* start with logical arm */
+		arm = get_arm_from_strip(instance, ld, stripe, map);
+		if (arm != -1U)
+			arm *= 2;
+		break;
+	}
+
+	return arm;
+}
+
+
+/*
+******************************************************************************
+*
+* This routine calculates the arm, span and block for the specified stripe and
+* reference in stripe using spanset
+*
+* Inputs :
+*
+*    ld   - Logical drive number
+*    stripRow        - Stripe number
+*    stripRef    - Reference in stripe
+*
+* Outputs :
+*
+*    span          - Span number
+*    block         - Absolute Block number in the physical disk
+*/
+static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
+		u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
+		struct RAID_CONTEXT *pRAID_Context,
+		struct MR_DRV_RAID_MAP_ALL *map)
+{
+	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
+	u32     pd, arRef, r1_alt_pd;
+	u8      physArm, span;
+	u64     row;
+	u8	retval = true;
+	u64	*pdBlock = &io_info->pdBlock;
+	__le16	*pDevHandle = &io_info->devHandle;
+	u8	*pPdInterface = &io_info->pd_interface;
+	u32	logArm, rowMod, armQ, arm;
+	struct fusion_context *fusion;
+
+	fusion = instance->ctrl_context;
+	*pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
+
+	/*Get row and span from io_info for Uneven Span IO.*/
+	row	    = io_info->start_row;
+	span	    = io_info->start_span;
+
+
+	if (raid->level == 6) {
+		logArm = get_arm_from_strip(instance, ld, stripRow, map);
+		if (logArm == -1U)
+			return false;
+		rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
+		armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod;
+		arm = armQ + 1 + logArm;
+		if (arm >= SPAN_ROW_SIZE(map, ld, span))
+			arm -= SPAN_ROW_SIZE(map, ld, span);
+		physArm = (u8)arm;
+	} else
+		/* Calculate the arm */
+		physArm = get_arm(instance, ld, span, stripRow, map);
+	if (physArm == 0xFF)
+		return false;
+
+	arRef       = MR_LdSpanArrayGet(ld, span, map);
+	pd          = MR_ArPdGet(arRef, physArm, map);
+
+	if (pd != MR_PD_INVALID) {
+		*pDevHandle = MR_PdDevHandleGet(pd, map);
+		*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
+		/* get second pd also for raid 1/10 fast path writes*/
+		if ((instance->adapter_type == VENTURA_SERIES) &&
+		    (raid->level == 1) &&
+		    !io_info->isRead) {
+			r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
+			if (r1_alt_pd != MR_PD_INVALID)
+				io_info->r1_alt_dev_handle =
+				MR_PdDevHandleGet(r1_alt_pd, map);
+		}
+	} else {
+		if ((raid->level >= 5) &&
+			((instance->adapter_type == THUNDERBOLT_SERIES)  ||
+			((instance->adapter_type == INVADER_SERIES) &&
+			(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
+			pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
+		else if (raid->level == 1) {
+			physArm = physArm + 1;
+			pd = MR_ArPdGet(arRef, physArm, map);
+			if (pd != MR_PD_INVALID) {
+				*pDevHandle = MR_PdDevHandleGet(pd, map);
+				*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
+			}
+		}
+	}
+
+	*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
+	if (instance->adapter_type == VENTURA_SERIES) {
+		((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
+			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+		io_info->span_arm =
+			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+	} else {
+		pRAID_Context->span_arm =
+			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+		io_info->span_arm = pRAID_Context->span_arm;
+	}
+	io_info->pd_after_lb = pd;
+	return retval;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the arm, span and block for the specified stripe and
+* reference in stripe.
+*
+* Inputs :
+*
+*    ld   - Logical drive number
+*    stripRow        - Stripe number
+*    stripRef    - Reference in stripe
+*
+* Outputs :
+*
+*    span          - Span number
+*    block         - Absolute Block number in the physical disk
+*/
+u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
+		u16 stripRef, struct IO_REQUEST_INFO *io_info,
+		struct RAID_CONTEXT *pRAID_Context,
+		struct MR_DRV_RAID_MAP_ALL *map)
+{
+	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
+	u32         pd, arRef, r1_alt_pd;
+	u8          physArm, span;
+	u64         row;
+	u8	    retval = true;
+	u64	    *pdBlock = &io_info->pdBlock;
+	__le16	    *pDevHandle = &io_info->devHandle;
+	u8	    *pPdInterface = &io_info->pd_interface;
+	struct fusion_context *fusion;
+
+	fusion = instance->ctrl_context;
+	*pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
+
+	row =  mega_div64_32(stripRow, raid->rowDataSize);
+
+	if (raid->level == 6) {
+		/* logical arm within row */
+		u32 logArm =  mega_mod64(stripRow, raid->rowDataSize);
+		u32 rowMod, armQ, arm;
+
+		if (raid->rowSize == 0)
+			return false;
+		/* get logical row mod */
+		rowMod = mega_mod64(row, raid->rowSize);
+		armQ = raid->rowSize-1-rowMod; /* index of Q drive */
+		arm = armQ+1+logArm; /* data always logically follows Q */
+		if (arm >= raid->rowSize) /* handle wrap condition */
+			arm -= raid->rowSize;
+		physArm = (u8)arm;
+	} else  {
+		if (raid->modFactor == 0)
+			return false;
+		physArm = MR_LdDataArmGet(ld,  mega_mod64(stripRow,
+							  raid->modFactor),
+					  map);
+	}
+
+	if (raid->spanDepth == 1) {
+		span = 0;
+		*pdBlock = row << raid->stripeShift;
+	} else {
+		span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map);
+		if (span == SPAN_INVALID)
+			return false;
+	}
+
+	/* Get the array on which this span is present */
+	arRef       = MR_LdSpanArrayGet(ld, span, map);
+	pd          = MR_ArPdGet(arRef, physArm, map); /* Get the pd */
+
+	if (pd != MR_PD_INVALID) {
+		/* Get dev handle from Pd. */
+		*pDevHandle = MR_PdDevHandleGet(pd, map);
+		*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
+		/* get second pd also for raid 1/10 fast path writes*/
+		if ((instance->adapter_type == VENTURA_SERIES) &&
+		    (raid->level == 1) &&
+		    !io_info->isRead) {
+			r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
+			if (r1_alt_pd != MR_PD_INVALID)
+				io_info->r1_alt_dev_handle =
+					MR_PdDevHandleGet(r1_alt_pd, map);
+		}
+	} else {
+		if ((raid->level >= 5) &&
+			((instance->adapter_type == THUNDERBOLT_SERIES)  ||
+			((instance->adapter_type == INVADER_SERIES) &&
+			(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
+			pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
+		else if (raid->level == 1) {
+			/* Get alternate Pd. */
+			physArm = physArm + 1;
+			pd = MR_ArPdGet(arRef, physArm, map);
+			if (pd != MR_PD_INVALID) {
+				/* Get dev handle from Pd */
+				*pDevHandle = MR_PdDevHandleGet(pd, map);
+				*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
+			}
+		}
+	}
+
+	*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
+	if (instance->adapter_type == VENTURA_SERIES) {
+		((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
+				(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+		io_info->span_arm =
+				(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+	} else {
+		pRAID_Context->span_arm =
+			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+		io_info->span_arm = pRAID_Context->span_arm;
+	}
+	io_info->pd_after_lb = pd;
+	return retval;
+}
+
+/*
+******************************************************************************
+*
+* MR_BuildRaidContext function
+*
+* This function will initiate command processing.  The start/end row and strip
+* information is calculated then the lock is acquired.
+* This function will return 0 if region lock was acquired OR return num strips
+*/
+u8
+MR_BuildRaidContext(struct megasas_instance *instance,
+		    struct IO_REQUEST_INFO *io_info,
+		    struct RAID_CONTEXT *pRAID_Context,
+		    struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN)
+{
+	struct fusion_context *fusion;
+	struct MR_LD_RAID  *raid;
+	u32         stripSize, stripe_mask;
+	u64         endLba, endStrip, endRow, start_row, start_strip;
+	u64         regStart;
+	u32         regSize;
+	u8          num_strips, numRows;
+	u16         ref_in_start_stripe, ref_in_end_stripe;
+	u64         ldStartBlock;
+	u32         numBlocks, ldTgtId;
+	u8          isRead;
+	u8	    retval = 0;
+	u8	    startlba_span = SPAN_INVALID;
+	u64 *pdBlock = &io_info->pdBlock;
+	u16	    ld;
+
+	ldStartBlock = io_info->ldStartBlock;
+	numBlocks = io_info->numBlocks;
+	ldTgtId = io_info->ldTgtId;
+	isRead = io_info->isRead;
+	io_info->IoforUnevenSpan = 0;
+	io_info->start_span	= SPAN_INVALID;
+	fusion = instance->ctrl_context;
+
+	ld = MR_TargetIdToLdGet(ldTgtId, map);
+	raid = MR_LdRaidGet(ld, map);
+	/*check read ahead bit*/
+	io_info->ra_capable = raid->capability.ra_capable;
+
+	/*
+	 * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero
+	 * return FALSE
+	 */
+	if (raid->rowDataSize == 0) {
+		if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
+			return false;
+		else if (instance->UnevenSpanSupport) {
+			io_info->IoforUnevenSpan = 1;
+		} else {
+			dev_info(&instance->pdev->dev,
+				"raid->rowDataSize is 0, but has SPAN[0]"
+				"rowDataSize = 0x%0x,"
+				"but there is _NO_ UnevenSpanSupport\n",
+				MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
+			return false;
+		}
+	}
+
+	stripSize = 1 << raid->stripeShift;
+	stripe_mask = stripSize-1;
+
+
+	/*
+	 * calculate starting row and stripe, and number of strips and rows
+	 */
+	start_strip         = ldStartBlock >> raid->stripeShift;
+	ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask);
+	endLba              = ldStartBlock + numBlocks - 1;
+	ref_in_end_stripe   = (u16)(endLba & stripe_mask);
+	endStrip            = endLba >> raid->stripeShift;
+	num_strips          = (u8)(endStrip - start_strip + 1); /* End strip */
+
+	if (io_info->IoforUnevenSpan) {
+		start_row = get_row_from_strip(instance, ld, start_strip, map);
+		endRow	  = get_row_from_strip(instance, ld, endStrip, map);
+		if (start_row == -1ULL || endRow == -1ULL) {
+			dev_info(&instance->pdev->dev, "return from %s %d."
+				"Send IO w/o region lock.\n",
+				__func__, __LINE__);
+			return false;
+		}
+
+		if (raid->spanDepth == 1) {
+			startlba_span = 0;
+			*pdBlock = start_row << raid->stripeShift;
+		} else
+			startlba_span = (u8)mr_spanset_get_span_block(instance,
+						ld, start_row, pdBlock, map);
+		if (startlba_span == SPAN_INVALID) {
+			dev_info(&instance->pdev->dev, "return from %s %d"
+				"for row 0x%llx,start strip %llx"
+				"endSrip %llx\n", __func__, __LINE__,
+				(unsigned long long)start_row,
+				(unsigned long long)start_strip,
+				(unsigned long long)endStrip);
+			return false;
+		}
+		io_info->start_span	= startlba_span;
+		io_info->start_row	= start_row;
+	} else {
+		start_row = mega_div64_32(start_strip, raid->rowDataSize);
+		endRow    = mega_div64_32(endStrip, raid->rowDataSize);
+	}
+	numRows = (u8)(endRow - start_row + 1);
+
+	/*
+	 * calculate region info.
+	 */
+
+	/* assume region is at the start of the first row */
+	regStart            = start_row << raid->stripeShift;
+	/* assume this IO needs the full row - we'll adjust if not true */
+	regSize             = stripSize;
+
+	io_info->do_fp_rlbypass = raid->capability.fpBypassRegionLock;
+
+	/* Check if we can send this I/O via FastPath */
+	if (raid->capability.fpCapable) {
+		if (isRead)
+			io_info->fpOkForIo = (raid->capability.fpReadCapable &&
+					      ((num_strips == 1) ||
+					       raid->capability.
+					       fpReadAcrossStripe));
+		else
+			io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
+					      ((num_strips == 1) ||
+					       raid->capability.
+					       fpWriteAcrossStripe));
+	} else
+		io_info->fpOkForIo = false;
+
+	if (numRows == 1) {
+		/* single-strip IOs can always lock only the data needed */
+		if (num_strips == 1) {
+			regStart += ref_in_start_stripe;
+			regSize = numBlocks;
+		}
+		/* multi-strip IOs always need to full stripe locked */
+	} else if (io_info->IoforUnevenSpan == 0) {
+		/*
+		 * For Even span region lock optimization.
+		 * If the start strip is the last in the start row
+		 */
+		if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
+			regStart += ref_in_start_stripe;
+			/* initialize count to sectors from startref to end
+			   of strip */
+			regSize = stripSize - ref_in_start_stripe;
+		}
+
+		/* add complete rows in the middle of the transfer */
+		if (numRows > 2)
+			regSize += (numRows-2) << raid->stripeShift;
+
+		/* if IO ends within first strip of last row*/
+		if (endStrip == endRow*raid->rowDataSize)
+			regSize += ref_in_end_stripe+1;
+		else
+			regSize += stripSize;
+	} else {
+		/*
+		 * For Uneven span region lock optimization.
+		 * If the start strip is the last in the start row
+		 */
+		if (start_strip == (get_strip_from_row(instance, ld, start_row, map) +
+				SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
+			regStart += ref_in_start_stripe;
+			/* initialize count to sectors from
+			 * startRef to end of strip
+			 */
+			regSize = stripSize - ref_in_start_stripe;
+		}
+		/* Add complete rows in the middle of the transfer*/
+
+		if (numRows > 2)
+			/* Add complete rows in the middle of the transfer*/
+			regSize += (numRows-2) << raid->stripeShift;
+
+		/* if IO ends within first strip of last row */
+		if (endStrip == get_strip_from_row(instance, ld, endRow, map))
+			regSize += ref_in_end_stripe + 1;
+		else
+			regSize += stripSize;
+	}
+
+	pRAID_Context->timeout_value =
+		cpu_to_le16(raid->fpIoTimeoutForLd ?
+			    raid->fpIoTimeoutForLd :
+			    map->raidMap.fpPdIoTimeoutSec);
+	if (instance->adapter_type == INVADER_SERIES)
+		pRAID_Context->reg_lock_flags = (isRead) ?
+			raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
+	else if (instance->adapter_type == THUNDERBOLT_SERIES)
+		pRAID_Context->reg_lock_flags = (isRead) ?
+			REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
+	pRAID_Context->virtual_disk_tgt_id = raid->targetId;
+	pRAID_Context->reg_lock_row_lba    = cpu_to_le64(regStart);
+	pRAID_Context->reg_lock_length    = cpu_to_le32(regSize);
+	pRAID_Context->config_seq_num	= raid->seqNum;
+	/* save pointer to raid->LUN array */
+	*raidLUN = raid->LUN;
+
+
+	/*Get Phy Params only if FP capable, or else leave it to MR firmware
+	  to do the calculation.*/
+	if (io_info->fpOkForIo) {
+		retval = io_info->IoforUnevenSpan ?
+				mr_spanset_get_phy_params(instance, ld,
+					start_strip, ref_in_start_stripe,
+					io_info, pRAID_Context, map) :
+				MR_GetPhyParams(instance, ld, start_strip,
+					ref_in_start_stripe, io_info,
+					pRAID_Context, map);
+		/* If IO on an invalid Pd, then FP is not possible.*/
+		if (io_info->devHandle == MR_DEVHANDLE_INVALID)
+			io_info->fpOkForIo = false;
+		return retval;
+	} else if (isRead) {
+		uint stripIdx;
+		for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
+			retval = io_info->IoforUnevenSpan ?
+				mr_spanset_get_phy_params(instance, ld,
+				    start_strip + stripIdx,
+				    ref_in_start_stripe, io_info,
+				    pRAID_Context, map) :
+				MR_GetPhyParams(instance, ld,
+				    start_strip + stripIdx, ref_in_start_stripe,
+				    io_info, pRAID_Context, map);
+			if (!retval)
+				return true;
+		}
+	}
+	return true;
+}
+
+/*
+******************************************************************************
+*
+* This routine pepare spanset info from Valid Raid map and store it into
+* local copy of ldSpanInfo per instance data structure.
+*
+* Inputs :
+* map    - LD map
+* ldSpanInfo - ldSpanInfo per HBA instance
+*
+*/
+void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
+	PLD_SPAN_INFO ldSpanInfo)
+{
+	u8   span, count;
+	u32  element, span_row_width;
+	u64  span_row;
+	struct MR_LD_RAID *raid;
+	LD_SPAN_SET *span_set, *span_set_prev;
+	struct MR_QUAD_ELEMENT    *quad;
+	int ldCount;
+	u16 ld;
+
+
+	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
+		ld = MR_TargetIdToLdGet(ldCount, map);
+		if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
+			continue;
+		raid = MR_LdRaidGet(ld, map);
+		for (element = 0; element < MAX_QUAD_DEPTH; element++) {
+			for (span = 0; span < raid->spanDepth; span++) {
+				if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+					block_span_info.noElements) <
+					element + 1)
+					continue;
+				span_set = &(ldSpanInfo[ld].span_set[element]);
+				quad = &map->raidMap.ldSpanMap[ld].
+					spanBlock[span].block_span_info.
+					quad[element];
+
+				span_set->diff = le32_to_cpu(quad->diff);
+
+				for (count = 0, span_row_width = 0;
+					count < raid->spanDepth; count++) {
+					if (le32_to_cpu(map->raidMap.ldSpanMap[ld].
+						spanBlock[count].
+						block_span_info.
+						noElements) >= element + 1) {
+						span_set->strip_offset[count] =
+							span_row_width;
+						span_row_width +=
+							MR_LdSpanPtrGet
+							(ld, count, map)->spanRowDataSize;
+					}
+				}
+
+				span_set->span_row_data_width = span_row_width;
+				span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) -
+					le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)),
+					le32_to_cpu(quad->diff));
+
+				if (element == 0) {
+					span_set->log_start_lba = 0;
+					span_set->log_end_lba =
+						((span_row << raid->stripeShift)
+						* span_row_width) - 1;
+
+					span_set->span_row_start = 0;
+					span_set->span_row_end = span_row - 1;
+
+					span_set->data_strip_start = 0;
+					span_set->data_strip_end =
+						(span_row * span_row_width) - 1;
+
+					span_set->data_row_start = 0;
+					span_set->data_row_end =
+						(span_row * le32_to_cpu(quad->diff)) - 1;
+				} else {
+					span_set_prev = &(ldSpanInfo[ld].
+							span_set[element - 1]);
+					span_set->log_start_lba =
+						span_set_prev->log_end_lba + 1;
+					span_set->log_end_lba =
+						span_set->log_start_lba +
+						((span_row << raid->stripeShift)
+						* span_row_width) - 1;
+
+					span_set->span_row_start =
+						span_set_prev->span_row_end + 1;
+					span_set->span_row_end =
+					span_set->span_row_start + span_row - 1;
+
+					span_set->data_strip_start =
+					span_set_prev->data_strip_end + 1;
+					span_set->data_strip_end =
+						span_set->data_strip_start +
+						(span_row * span_row_width) - 1;
+
+					span_set->data_row_start =
+						span_set_prev->data_row_end + 1;
+					span_set->data_row_end =
+						span_set->data_row_start +
+						(span_row * le32_to_cpu(quad->diff)) - 1;
+				}
+				break;
+		}
+		if (span == raid->spanDepth)
+			break;
+	    }
+	}
+}
+
+void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
+	struct LD_LOAD_BALANCE_INFO *lbInfo)
+{
+	int ldCount;
+	u16 ld;
+	struct MR_LD_RAID *raid;
+
+	if (lb_pending_cmds > 128 || lb_pending_cmds < 1)
+		lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
+
+	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
+		ld = MR_TargetIdToLdGet(ldCount, drv_map);
+		if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) {
+			lbInfo[ldCount].loadBalanceFlag = 0;
+			continue;
+		}
+
+		raid = MR_LdRaidGet(ld, drv_map);
+		if ((raid->level != 1) ||
+			(raid->ldState != MR_LD_STATE_OPTIMAL)) {
+			lbInfo[ldCount].loadBalanceFlag = 0;
+			continue;
+		}
+		lbInfo[ldCount].loadBalanceFlag = 1;
+	}
+}
+
+u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
+			   struct LD_LOAD_BALANCE_INFO *lbInfo,
+			   struct IO_REQUEST_INFO *io_info,
+			   struct MR_DRV_RAID_MAP_ALL *drv_map)
+{
+	struct MR_LD_RAID  *raid;
+	u16	pd1_dev_handle;
+	u16     pend0, pend1, ld;
+	u64     diff0, diff1;
+	u8      bestArm, pd0, pd1, span, arm;
+	u32     arRef, span_row_size;
+
+	u64 block = io_info->ldStartBlock;
+	u32 count = io_info->numBlocks;
+
+	span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK)
+			>> RAID_CTX_SPANARM_SPAN_SHIFT);
+	arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK);
+
+	ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map);
+	raid = MR_LdRaidGet(ld, drv_map);
+	span_row_size = instance->UnevenSpanSupport ?
+			SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize;
+
+	arRef = MR_LdSpanArrayGet(ld, span, drv_map);
+	pd0 = MR_ArPdGet(arRef, arm, drv_map);
+	pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
+		(arm + 1 - span_row_size) : arm + 1, drv_map);
+
+	/* Get PD1 Dev Handle */
+
+	pd1_dev_handle = MR_PdDevHandleGet(pd1, drv_map);
+
+	if (pd1_dev_handle == MR_DEVHANDLE_INVALID) {
+		bestArm = arm;
+	} else {
+		/* get the pending cmds for the data and mirror arms */
+		pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
+		pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
+
+		/* Determine the disk whose head is nearer to the req. block */
+		diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
+		diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
+		bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
+
+		/* Make balance count from 16 to 4 to
+		 *  keep driver in sync with Firmware
+		 */
+		if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds)  ||
+		    (bestArm != arm && pend1 > pend0 + lb_pending_cmds))
+			bestArm ^= 1;
+
+		/* Update the last accessed block on the correct pd */
+		io_info->span_arm =
+			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
+		io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
+	}
+
+	lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
+	return io_info->pd_after_lb;
+}
+
+__le16 get_updated_dev_handle(struct megasas_instance *instance,
+			      struct LD_LOAD_BALANCE_INFO *lbInfo,
+			      struct IO_REQUEST_INFO *io_info,
+			      struct MR_DRV_RAID_MAP_ALL *drv_map)
+{
+	u8 arm_pd;
+	__le16 devHandle;
+
+	/* get best new arm (PD ID) */
+	arm_pd  = megasas_get_best_arm_pd(instance, lbInfo, io_info, drv_map);
+	devHandle = MR_PdDevHandleGet(arm_pd, drv_map);
+	io_info->pd_interface = MR_PdInterfaceTypeGet(arm_pd, drv_map);
+	atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]);
+
+	return devHandle;
+}
diff --git a/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_sas_fusion.c b/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_sas_fusion.c
new file mode 100644
index 0000000..b137212
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -0,0 +1,4555 @@
+/*
+ *  Linux MegaRAID driver for SAS based RAID controllers
+ *
+ *  Copyright (c) 2009-2013  LSI Corporation
+ *  Copyright (c) 2013-2014  Avago Technologies
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ *  FILE: megaraid_sas_fusion.c
+ *
+ *  Authors: Avago Technologies
+ *           Sumant Patro
+ *           Adam Radford
+ *           Kashyap Desai <kashyap.desai@avagotech.com>
+ *           Sumit Saxena <sumit.saxena@avagotech.com>
+ *
+ *  Send feedback to: megaraidlinux.pdl@avagotech.com
+ *
+ *  Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
+ *  San Jose, California 95131
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/compat.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/vmalloc.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_dbg.h>
+#include <linux/dmi.h>
+
+#include "megaraid_sas_fusion.h"
+#include "megaraid_sas.h"
+
+
+extern void megasas_free_cmds(struct megasas_instance *instance);
+extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance
+					   *instance);
+extern void
+megasas_complete_cmd(struct megasas_instance *instance,
+		     struct megasas_cmd *cmd, u8 alt_status);
+int
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+	      int seconds);
+
+void
+megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
+int megasas_alloc_cmds(struct megasas_instance *instance);
+int
+megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
+int
+megasas_issue_polled(struct megasas_instance *instance,
+		     struct megasas_cmd *cmd);
+void
+megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
+
+int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
+void megaraid_sas_kill_hba(struct megasas_instance *instance);
+
+extern u32 megasas_dbg_lvl;
+void megasas_sriov_heartbeat_handler(unsigned long instance_addr);
+int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
+				  int initial);
+void megasas_start_timer(struct megasas_instance *instance,
+			struct timer_list *timer,
+			 void *fn, unsigned long interval);
+extern struct megasas_mgmt_info megasas_mgmt_info;
+extern unsigned int resetwaittime;
+extern unsigned int dual_qdepth_disable;
+static void megasas_free_rdpq_fusion(struct megasas_instance *instance);
+static void megasas_free_reply_fusion(struct megasas_instance *instance);
+
+
+
+/**
+ * megasas_enable_intr_fusion -	Enables interrupts
+ * @regs:			MFI register set
+ */
+void
+megasas_enable_intr_fusion(struct megasas_instance *instance)
+{
+	struct megasas_register_set __iomem *regs;
+	regs = instance->reg_set;
+
+	instance->mask_interrupts = 0;
+	/* For Thunderbolt/Invader also clear intr on enable */
+	writel(~0, &regs->outbound_intr_status);
+	readl(&regs->outbound_intr_status);
+
+	writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
+
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_fusion - Disables interrupt
+ * @regs:			 MFI register set
+ */
+void
+megasas_disable_intr_fusion(struct megasas_instance *instance)
+{
+	u32 mask = 0xFFFFFFFF;
+	u32 status;
+	struct megasas_register_set __iomem *regs;
+	regs = instance->reg_set;
+	instance->mask_interrupts = 1;
+
+	writel(mask, &regs->outbound_intr_mask);
+	/* Dummy readl to force pci flush */
+	status = readl(&regs->outbound_intr_mask);
+}
+
+int
+megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs)
+{
+	u32 status;
+	/*
+	 * Check if it is our interrupt
+	 */
+	status = readl(&regs->outbound_intr_status);
+
+	if (status & 1) {
+		writel(status, &regs->outbound_intr_status);
+		readl(&regs->outbound_intr_status);
+		return 1;
+	}
+	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
+		return 0;
+
+	return 1;
+}
+
+/**
+ * megasas_get_cmd_fusion -	Get a command from the free pool
+ * @instance:		Adapter soft state
+ *
+ * Returns a blk_tag indexed mpt frame
+ */
+inline struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
+						  *instance, u32 blk_tag)
+{
+	struct fusion_context *fusion;
+
+	fusion = instance->ctrl_context;
+	return fusion->cmd_list[blk_tag];
+}
+
+/**
+ * megasas_return_cmd_fusion -	Return a cmd to free command pool
+ * @instance:		Adapter soft state
+ * @cmd:		Command packet to be returned to free command pool
+ */
+inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
+	struct megasas_cmd_fusion *cmd)
+{
+	cmd->scmd = NULL;
+	memset(cmd->io_request, 0, MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
+	cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
+	cmd->cmd_completed = false;
+}
+
+/**
+ * megasas_fire_cmd_fusion -	Sends command to the FW
+ * @instance:			Adapter soft state
+ * @req_desc:			64bit Request descriptor
+ *
+ * Perform PCI Write.
+ */
+
+static void
+megasas_fire_cmd_fusion(struct megasas_instance *instance,
+		union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
+{
+#if defined(writeq) && defined(CONFIG_64BIT)
+	u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
+		le32_to_cpu(req_desc->u.low));
+
+	writeq(req_data, &instance->reg_set->inbound_low_queue_port);
+#else
+	unsigned long flags;
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	writel(le32_to_cpu(req_desc->u.low),
+		&instance->reg_set->inbound_low_queue_port);
+	writel(le32_to_cpu(req_desc->u.high),
+		&instance->reg_set->inbound_high_queue_port);
+	mmiowb();
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+#endif
+}
+
+/**
+ * megasas_fusion_update_can_queue -	Do all Adapter Queue depth related calculations here
+ * @instance:							Adapter soft state
+ * fw_boot_context:						Whether this function called during probe or after OCR
+ *
+ * This function is only for fusion controllers.
+ * Update host can queue, if firmware downgrade max supported firmware commands.
+ * Firmware upgrade case will be skiped because underlying firmware has
+ * more resource than exposed to the OS.
+ *
+ */
+static void
+megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_context)
+{
+	u16 cur_max_fw_cmds = 0;
+	u16 ldio_threshold = 0;
+	struct megasas_register_set __iomem *reg_set;
+
+	reg_set = instance->reg_set;
+
+	/* ventura FW does not fill outbound_scratch_pad_3 with queue depth */
+	if (instance->adapter_type < VENTURA_SERIES)
+		cur_max_fw_cmds =
+		readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
+
+	if (dual_qdepth_disable || !cur_max_fw_cmds)
+		cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
+	else
+		ldio_threshold =
+			(instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
+
+	dev_info(&instance->pdev->dev,
+			"Current firmware maximum commands: %d\t LDIO threshold: %d\n",
+			cur_max_fw_cmds, ldio_threshold);
+
+	if (fw_boot_context == OCR_CONTEXT) {
+		cur_max_fw_cmds = cur_max_fw_cmds - 1;
+		if (cur_max_fw_cmds < instance->max_fw_cmds) {
+			instance->cur_can_queue =
+				cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS +
+						MEGASAS_FUSION_IOCTL_CMDS);
+			instance->host->can_queue = instance->cur_can_queue;
+			instance->ldio_threshold = ldio_threshold;
+		}
+	} else {
+		instance->max_fw_cmds = cur_max_fw_cmds;
+		instance->ldio_threshold = ldio_threshold;
+
+		if (!instance->is_rdpq)
+			instance->max_fw_cmds =
+				min_t(u16, instance->max_fw_cmds, 1024);
+
+		if (reset_devices)
+			instance->max_fw_cmds = min(instance->max_fw_cmds,
+						(u16)MEGASAS_KDUMP_QUEUE_DEPTH);
+		/*
+		* Reduce the max supported cmds by 1. This is to ensure that the
+		* reply_q_sz (1 more than the max cmd that driver may send)
+		* does not exceed max cmds that the FW can support
+		*/
+		instance->max_fw_cmds = instance->max_fw_cmds-1;
+
+		instance->max_scsi_cmds = instance->max_fw_cmds -
+				(MEGASAS_FUSION_INTERNAL_CMDS +
+				MEGASAS_FUSION_IOCTL_CMDS);
+		instance->cur_can_queue = instance->max_scsi_cmds;
+		instance->host->can_queue = instance->cur_can_queue;
+	}
+
+	if (instance->adapter_type == VENTURA_SERIES)
+		instance->max_mpt_cmds =
+		instance->max_fw_cmds * RAID_1_PEER_CMDS;
+	else
+		instance->max_mpt_cmds = instance->max_fw_cmds;
+}
+/**
+ * megasas_free_cmds_fusion -	Free all the cmds in the free cmd pool
+ * @instance:		Adapter soft state
+ */
+void
+megasas_free_cmds_fusion(struct megasas_instance *instance)
+{
+	int i;
+	struct fusion_context *fusion = instance->ctrl_context;
+	struct megasas_cmd_fusion *cmd;
+
+	/* SG, Sense */
+	for (i = 0; i < instance->max_mpt_cmds; i++) {
+		cmd = fusion->cmd_list[i];
+		if (cmd) {
+			if (cmd->sg_frame)
+				dma_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
+				      cmd->sg_frame_phys_addr);
+			if (cmd->sense)
+				dma_pool_free(fusion->sense_dma_pool, cmd->sense,
+				      cmd->sense_phys_addr);
+		}
+	}
+
+	if (fusion->sg_dma_pool) {
+		dma_pool_destroy(fusion->sg_dma_pool);
+		fusion->sg_dma_pool = NULL;
+	}
+	if (fusion->sense_dma_pool) {
+		dma_pool_destroy(fusion->sense_dma_pool);
+		fusion->sense_dma_pool = NULL;
+	}
+
+
+	/* Reply Frame, Desc*/
+	if (instance->is_rdpq)
+		megasas_free_rdpq_fusion(instance);
+	else
+		megasas_free_reply_fusion(instance);
+
+	/* Request Frame, Desc*/
+	if (fusion->req_frames_desc)
+		dma_free_coherent(&instance->pdev->dev,
+			fusion->request_alloc_sz, fusion->req_frames_desc,
+			fusion->req_frames_desc_phys);
+	if (fusion->io_request_frames)
+		dma_pool_free(fusion->io_request_frames_pool,
+			fusion->io_request_frames,
+			fusion->io_request_frames_phys);
+	if (fusion->io_request_frames_pool) {
+		dma_pool_destroy(fusion->io_request_frames_pool);
+		fusion->io_request_frames_pool = NULL;
+	}
+
+
+	/* cmd_list */
+	for (i = 0; i < instance->max_mpt_cmds; i++)
+		kfree(fusion->cmd_list[i]);
+
+	kfree(fusion->cmd_list);
+}
+
+/**
+ * megasas_create_sg_sense_fusion -	Creates DMA pool for cmd frames
+ * @instance:			Adapter soft state
+ *
+ */
+static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
+{
+	int i;
+	u16 max_cmd;
+	struct fusion_context *fusion;
+	struct megasas_cmd_fusion *cmd;
+
+	fusion = instance->ctrl_context;
+	max_cmd = instance->max_fw_cmds;
+
+
+	fusion->sg_dma_pool =
+			dma_pool_create("mr_sg", &instance->pdev->dev,
+				instance->max_chain_frame_sz,
+				MR_DEFAULT_NVME_PAGE_SIZE, 0);
+	/* SCSI_SENSE_BUFFERSIZE  = 96 bytes */
+	fusion->sense_dma_pool =
+			dma_pool_create("mr_sense", &instance->pdev->dev,
+				SCSI_SENSE_BUFFERSIZE, 64, 0);
+
+	if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) {
+		dev_err(&instance->pdev->dev,
+			"Failed from %s %d\n",  __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	/*
+	 * Allocate and attach a frame to each of the commands in cmd_list
+	 */
+	for (i = 0; i < max_cmd; i++) {
+		cmd = fusion->cmd_list[i];
+		cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool,
+					GFP_KERNEL, &cmd->sg_frame_phys_addr);
+
+		cmd->sense = dma_pool_alloc(fusion->sense_dma_pool,
+					GFP_KERNEL, &cmd->sense_phys_addr);
+		if (!cmd->sg_frame || !cmd->sense) {
+			dev_err(&instance->pdev->dev,
+				"Failed from %s %d\n",  __func__, __LINE__);
+			return -ENOMEM;
+		}
+	}
+
+	/* create sense buffer for the raid 1/10 fp */
+	for (i = max_cmd; i < instance->max_mpt_cmds; i++) {
+		cmd = fusion->cmd_list[i];
+		cmd->sense = dma_pool_alloc(fusion->sense_dma_pool,
+			GFP_KERNEL, &cmd->sense_phys_addr);
+		if (!cmd->sense) {
+			dev_err(&instance->pdev->dev,
+				"Failed from %s %d\n",  __func__, __LINE__);
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+int
+megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
+{
+	u32 max_mpt_cmd, i, j;
+	struct fusion_context *fusion;
+
+	fusion = instance->ctrl_context;
+
+	max_mpt_cmd = instance->max_mpt_cmds;
+
+	/*
+	 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
+	 * Allocate the dynamic array first and then allocate individual
+	 * commands.
+	 */
+	fusion->cmd_list =
+		kzalloc(sizeof(struct megasas_cmd_fusion *) * max_mpt_cmd,
+			GFP_KERNEL);
+	if (!fusion->cmd_list) {
+		dev_err(&instance->pdev->dev,
+			"Failed from %s %d\n",  __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < max_mpt_cmd; i++) {
+		fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion),
+					      GFP_KERNEL);
+		if (!fusion->cmd_list[i]) {
+			for (j = 0; j < i; j++)
+				kfree(fusion->cmd_list[j]);
+			kfree(fusion->cmd_list);
+			dev_err(&instance->pdev->dev,
+				"Failed from %s %d\n",  __func__, __LINE__);
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+int
+megasas_alloc_request_fusion(struct megasas_instance *instance)
+{
+	struct fusion_context *fusion;
+
+	fusion = instance->ctrl_context;
+
+	fusion->req_frames_desc =
+		dma_alloc_coherent(&instance->pdev->dev,
+			fusion->request_alloc_sz,
+			&fusion->req_frames_desc_phys, GFP_KERNEL);
+	if (!fusion->req_frames_desc) {
+		dev_err(&instance->pdev->dev,
+			"Failed from %s %d\n",  __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	fusion->io_request_frames_pool =
+			dma_pool_create("mr_ioreq", &instance->pdev->dev,
+				fusion->io_frames_alloc_sz, 16, 0);
+
+	if (!fusion->io_request_frames_pool) {
+		dev_err(&instance->pdev->dev,
+			"Failed from %s %d\n",  __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	fusion->io_request_frames =
+			dma_pool_alloc(fusion->io_request_frames_pool,
+				GFP_KERNEL, &fusion->io_request_frames_phys);
+	if (!fusion->io_request_frames) {
+		dev_err(&instance->pdev->dev,
+			"Failed from %s %d\n",  __func__, __LINE__);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+int
+megasas_alloc_reply_fusion(struct megasas_instance *instance)
+{
+	int i, count;
+	struct fusion_context *fusion;
+	union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
+	fusion = instance->ctrl_context;
+
+	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+	fusion->reply_frames_desc_pool =
+			dma_pool_create("mr_reply", &instance->pdev->dev,
+				fusion->reply_alloc_sz * count, 16, 0);
+
+	if (!fusion->reply_frames_desc_pool) {
+		dev_err(&instance->pdev->dev,
+			"Failed from %s %d\n",  __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	fusion->reply_frames_desc[0] =
+		dma_pool_alloc(fusion->reply_frames_desc_pool,
+			GFP_KERNEL, &fusion->reply_frames_desc_phys[0]);
+	if (!fusion->reply_frames_desc[0]) {
+		dev_err(&instance->pdev->dev,
+			"Failed from %s %d\n",  __func__, __LINE__);
+		return -ENOMEM;
+	}
+	reply_desc = fusion->reply_frames_desc[0];
+	for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
+		reply_desc->Words = cpu_to_le64(ULLONG_MAX);
+
+	/* This is not a rdpq mode, but driver still populate
+	 * reply_frame_desc array to use same msix index in ISR path.
+	 */
+	for (i = 0; i < (count - 1); i++)
+		fusion->reply_frames_desc[i + 1] =
+			fusion->reply_frames_desc[i] +
+			(fusion->reply_alloc_sz)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION);
+
+	return 0;
+}
+
+int
+megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
+{
+	int i, j, count;
+	struct fusion_context *fusion;
+	union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
+
+	fusion = instance->ctrl_context;
+
+	fusion->rdpq_virt = pci_alloc_consistent(instance->pdev,
+				sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
+				&fusion->rdpq_phys);
+	if (!fusion->rdpq_virt) {
+		dev_err(&instance->pdev->dev,
+			"Failed from %s %d\n",  __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	memset(fusion->rdpq_virt, 0,
+			sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION);
+	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+	fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq",
+							 &instance->pdev->dev,
+							 fusion->reply_alloc_sz,
+							 16, 0);
+
+	if (!fusion->reply_frames_desc_pool) {
+		dev_err(&instance->pdev->dev,
+			"Failed from %s %d\n",  __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < count; i++) {
+		fusion->reply_frames_desc[i] =
+				dma_pool_alloc(fusion->reply_frames_desc_pool,
+					GFP_KERNEL, &fusion->reply_frames_desc_phys[i]);
+		if (!fusion->reply_frames_desc[i]) {
+			dev_err(&instance->pdev->dev,
+				"Failed from %s %d\n",  __func__, __LINE__);
+			return -ENOMEM;
+		}
+
+		fusion->rdpq_virt[i].RDPQBaseAddress =
+			cpu_to_le64(fusion->reply_frames_desc_phys[i]);
+
+		reply_desc = fusion->reply_frames_desc[i];
+		for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
+			reply_desc->Words = cpu_to_le64(ULLONG_MAX);
+	}
+	return 0;
+}
+
+static void
+megasas_free_rdpq_fusion(struct megasas_instance *instance) {
+
+	int i;
+	struct fusion_context *fusion;
+
+	fusion = instance->ctrl_context;
+
+	for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) {
+		if (fusion->reply_frames_desc[i])
+			dma_pool_free(fusion->reply_frames_desc_pool,
+				fusion->reply_frames_desc[i],
+				fusion->reply_frames_desc_phys[i]);
+	}
+
+	if (fusion->reply_frames_desc_pool)
+		dma_pool_destroy(fusion->reply_frames_desc_pool);
+
+	if (fusion->rdpq_virt)
+		pci_free_consistent(instance->pdev,
+			sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
+			fusion->rdpq_virt, fusion->rdpq_phys);
+}
+
+static void
+megasas_free_reply_fusion(struct megasas_instance *instance) {
+
+	struct fusion_context *fusion;
+
+	fusion = instance->ctrl_context;
+
+	if (fusion->reply_frames_desc[0])
+		dma_pool_free(fusion->reply_frames_desc_pool,
+			fusion->reply_frames_desc[0],
+			fusion->reply_frames_desc_phys[0]);
+
+	if (fusion->reply_frames_desc_pool)
+		dma_pool_destroy(fusion->reply_frames_desc_pool);
+
+}
+
+
+/**
+ * megasas_alloc_cmds_fusion -	Allocates the command packets
+ * @instance:		Adapter soft state
+ *
+ *
+ * Each frame has a 32-bit field called context. This context is used to get
+ * back the megasas_cmd_fusion from the frame when a frame gets completed
+ * In this driver, the 32 bit values are the indices into an array cmd_list.
+ * This array is used only to look up the megasas_cmd_fusion given the context.
+ * The free commands themselves are maintained in a linked list called cmd_pool.
+ *
+ * cmds are formed in the io_request and sg_frame members of the
+ * megasas_cmd_fusion. The context field is used to get a request descriptor
+ * and is used as SMID of the cmd.
+ * SMID value range is from 1 to max_fw_cmds.
+ */
+int
+megasas_alloc_cmds_fusion(struct megasas_instance *instance)
+{
+	int i;
+	struct fusion_context *fusion;
+	struct megasas_cmd_fusion *cmd;
+	u32 offset;
+	dma_addr_t io_req_base_phys;
+	u8 *io_req_base;
+
+
+	fusion = instance->ctrl_context;
+
+	if (megasas_alloc_cmdlist_fusion(instance))
+		goto fail_exit;
+
+	if (megasas_alloc_request_fusion(instance))
+		goto fail_exit;
+
+	if (instance->is_rdpq) {
+		if (megasas_alloc_rdpq_fusion(instance))
+			goto fail_exit;
+	} else
+		if (megasas_alloc_reply_fusion(instance))
+			goto fail_exit;
+
+
+	/* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
+	io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+	io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+
+	/*
+	 * Add all the commands to command pool (fusion->cmd_pool)
+	 */
+
+	/* SMID 0 is reserved. Set SMID/index from 1 */
+	for (i = 0; i < instance->max_mpt_cmds; i++) {
+		cmd = fusion->cmd_list[i];
+		offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
+		memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
+		cmd->index = i + 1;
+		cmd->scmd = NULL;
+		cmd->sync_cmd_idx =
+		(i >= instance->max_scsi_cmds && i < instance->max_fw_cmds) ?
+				(i - instance->max_scsi_cmds) :
+				(u32)ULONG_MAX; /* Set to Invalid */
+		cmd->instance = instance;
+		cmd->io_request =
+			(struct MPI2_RAID_SCSI_IO_REQUEST *)
+		  (io_req_base + offset);
+		memset(cmd->io_request, 0,
+		       sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
+		cmd->io_request_phys_addr = io_req_base_phys + offset;
+		cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
+	}
+
+	if (megasas_create_sg_sense_fusion(instance))
+		goto fail_exit;
+
+	return 0;
+
+fail_exit:
+	megasas_free_cmds_fusion(instance);
+	return -ENOMEM;
+}
+
+/**
+ * wait_and_poll -	Issues a polling command
+ * @instance:			Adapter soft state
+ * @cmd:			Command packet to be issued
+ *
+ * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
+ */
+int
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+	int seconds)
+{
+	int i;
+	struct megasas_header *frame_hdr = &cmd->frame->hdr;
+	struct fusion_context *fusion;
+
+	u32 msecs = seconds * 1000;
+
+	fusion = instance->ctrl_context;
+	/*
+	 * Wait for cmd_status to change
+	 */
+	for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) {
+		rmb();
+		msleep(20);
+	}
+
+	if (frame_hdr->cmd_status == MFI_STAT_INVALID_STATUS)
+		return DCMD_TIMEOUT;
+	else if (frame_hdr->cmd_status == MFI_STAT_OK)
+		return DCMD_SUCCESS;
+	else
+		return DCMD_FAILED;
+}
+
+/**
+ * megasas_ioc_init_fusion -	Initializes the FW
+ * @instance:		Adapter soft state
+ *
+ * Issues the IOC Init cmd
+ */
+int
+megasas_ioc_init_fusion(struct megasas_instance *instance)
+{
+	struct megasas_init_frame *init_frame;
+	struct MPI2_IOC_INIT_REQUEST *IOCInitMessage = NULL;
+	dma_addr_t	ioc_init_handle;
+	struct megasas_cmd *cmd;
+	u8 ret, cur_rdpq_mode;
+	struct fusion_context *fusion;
+	union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
+	int i;
+	struct megasas_header *frame_hdr;
+	const char *sys_info;
+	MFI_CAPABILITIES *drv_ops;
+	u32 scratch_pad_2;
+
+	fusion = instance->ctrl_context;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		dev_err(&instance->pdev->dev, "Could not allocate cmd for INIT Frame\n");
+		ret = 1;
+		goto fail_get_cmd;
+	}
+
+	scratch_pad_2 = readl
+		(&instance->reg_set->outbound_scratch_pad_2);
+
+	cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
+
+	if (instance->is_rdpq && !cur_rdpq_mode) {
+		dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*"
+			" from RDPQ mode to non RDPQ mode\n");
+		ret = 1;
+		goto fail_fw_init;
+	}
+
+	instance->fw_sync_cache_support = (scratch_pad_2 &
+		MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
+	dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
+		 instance->fw_sync_cache_support ? "Yes" : "No");
+
+	IOCInitMessage =
+	  dma_alloc_coherent(&instance->pdev->dev,
+			     sizeof(struct MPI2_IOC_INIT_REQUEST),
+			     &ioc_init_handle, GFP_KERNEL);
+
+	if (!IOCInitMessage) {
+		dev_err(&instance->pdev->dev, "Could not allocate memory for "
+		       "IOCInitMessage\n");
+		ret = 1;
+		goto fail_fw_init;
+	}
+
+	memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST));
+
+	IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
+	IOCInitMessage->WhoInit	= MPI2_WHOINIT_HOST_DRIVER;
+	IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION);
+	IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
+	IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
+
+	IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
+	IOCInitMessage->ReplyDescriptorPostQueueAddress = instance->is_rdpq ?
+			cpu_to_le64(fusion->rdpq_phys) :
+			cpu_to_le64(fusion->reply_frames_desc_phys[0]);
+	IOCInitMessage->MsgFlags = instance->is_rdpq ?
+			MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
+	IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
+	IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
+	IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
+	init_frame = (struct megasas_init_frame *)cmd->frame;
+	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
+
+	frame_hdr = &cmd->frame->hdr;
+	frame_hdr->cmd_status = 0xFF;
+	frame_hdr->flags = cpu_to_le16(
+		le16_to_cpu(frame_hdr->flags) |
+		MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
+
+	init_frame->cmd	= MFI_CMD_INIT;
+	init_frame->cmd_status = 0xFF;
+
+	drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations);
+
+	/* driver support Extended MSIX */
+	if (instance->adapter_type >= INVADER_SERIES)
+		drv_ops->mfi_capabilities.support_additional_msix = 1;
+	/* driver supports HA / Remote LUN over Fast Path interface */
+	drv_ops->mfi_capabilities.support_fp_remote_lun = 1;
+
+	drv_ops->mfi_capabilities.support_max_255lds = 1;
+	drv_ops->mfi_capabilities.support_ndrive_r1_lb = 1;
+	drv_ops->mfi_capabilities.security_protocol_cmds_fw = 1;
+
+	if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
+		drv_ops->mfi_capabilities.support_ext_io_size = 1;
+
+	drv_ops->mfi_capabilities.support_fp_rlbypass = 1;
+	if (!dual_qdepth_disable)
+		drv_ops->mfi_capabilities.support_ext_queue_depth = 1;
+
+	drv_ops->mfi_capabilities.support_qd_throttling = 1;
+	drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
+	/* Convert capability to LE32 */
+	cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
+
+	sys_info = dmi_get_system_info(DMI_PRODUCT_UUID);
+	if (instance->system_info_buf && sys_info) {
+		memcpy(instance->system_info_buf->systemId, sys_info,
+			strlen(sys_info) > 64 ? 64 : strlen(sys_info));
+		instance->system_info_buf->systemIdLength =
+			strlen(sys_info) > 64 ? 64 : strlen(sys_info);
+		init_frame->system_info_lo = instance->system_info_h;
+		init_frame->system_info_hi = 0;
+	}
+
+	init_frame->queue_info_new_phys_addr_hi =
+		cpu_to_le32(upper_32_bits(ioc_init_handle));
+	init_frame->queue_info_new_phys_addr_lo =
+		cpu_to_le32(lower_32_bits(ioc_init_handle));
+	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
+
+	req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr));
+	req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr));
+	req_desc.MFAIo.RequestFlags =
+		(MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
+		MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+
+	/*
+	 * disable the intr before firing the init frame
+	 */
+	instance->instancet->disable_intr(instance);
+
+	for (i = 0; i < (10 * 1000); i += 20) {
+		if (readl(&instance->reg_set->doorbell) & 1)
+			msleep(20);
+		else
+			break;
+	}
+
+	megasas_fire_cmd_fusion(instance, &req_desc);
+
+	wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
+
+	frame_hdr = &cmd->frame->hdr;
+	if (frame_hdr->cmd_status != 0) {
+		ret = 1;
+		goto fail_fw_init;
+	}
+
+	return 0;
+
+fail_fw_init:
+	megasas_return_cmd(instance, cmd);
+	if (IOCInitMessage)
+		dma_free_coherent(&instance->pdev->dev,
+				  sizeof(struct MPI2_IOC_INIT_REQUEST),
+				  IOCInitMessage, ioc_init_handle);
+fail_get_cmd:
+	dev_err(&instance->pdev->dev,
+		"Init cmd return status FAILED for SCSI host %d\n",
+		instance->host->host_no);
+
+	return ret;
+}
+
+/**
+ * megasas_sync_pd_seq_num -	JBOD SEQ MAP
+ * @instance:		Adapter soft state
+ * @pend:		set to 1, if it is pended jbod map.
+ *
+ * Issue Jbod map to the firmware. If it is pended command,
+ * issue command and return. If it is first instance of jbod map
+ * issue and receive command.
+ */
+int
+megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
+	int ret = 0;
+	u32 pd_seq_map_sz;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct fusion_context *fusion = instance->ctrl_context;
+	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
+	dma_addr_t pd_seq_h;
+
+	pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)];
+	pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)];
+	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
+			(sizeof(struct MR_PD_CFG_SEQ) *
+			(MAX_PHYSICAL_DEVICES - 1));
+
+	cmd = megasas_get_cmd(instance);
+	if (!cmd) {
+		dev_err(&instance->pdev->dev,
+			"Could not get mfi cmd. Fail from %s %d\n",
+			__func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	memset(pd_sync, 0, pd_seq_map_sz);
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0xFF;
+	dcmd->sge_count = 1;
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz);
+	dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(pd_seq_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(pd_seq_map_sz);
+
+	if (pend) {
+		dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
+		dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
+		instance->jbod_seq_cmd = cmd;
+		instance->instancet->issue_dcmd(instance, cmd);
+		return 0;
+	}
+
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+
+	/* Below code is only for non pended DCMD */
+	if (instance->ctrl_context && !instance->mask_interrupts)
+		ret = megasas_issue_blocked_cmd(instance, cmd,
+			MFI_IO_TIMEOUT_SECS);
+	else
+		ret = megasas_issue_polled(instance, cmd);
+
+	if (le32_to_cpu(pd_sync->count) > MAX_PHYSICAL_DEVICES) {
+		dev_warn(&instance->pdev->dev,
+			"driver supports max %d JBOD, but FW reports %d\n",
+			MAX_PHYSICAL_DEVICES, le32_to_cpu(pd_sync->count));
+		ret = -EINVAL;
+	}
+
+	if (ret == DCMD_TIMEOUT && instance->ctrl_context)
+		megaraid_sas_kill_hba(instance);
+
+	if (ret == DCMD_SUCCESS)
+		instance->pd_seq_map_id++;
+
+	megasas_return_cmd(instance, cmd);
+	return ret;
+}
+
+/*
+ * megasas_get_ld_map_info -	Returns FW's ld_map structure
+ * @instance:				Adapter soft state
+ * @pend:				Pend the command or not
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure.  This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO
+ * dcmd.mbox.b[0]	- number of LDs being sync'd
+ * dcmd.mbox.b[1]	- 0 - complete command immediately.
+ *			- 1 - pend till config change
+ * dcmd.mbox.b[2]	- 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP
+ *			- 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and
+ *				uses extended struct MR_FW_RAID_MAP_EXT
+ */
+static int
+megasas_get_ld_map_info(struct megasas_instance *instance)
+{
+	int ret = 0;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	void *ci;
+	dma_addr_t ci_h = 0;
+	u32 size_map_info;
+	struct fusion_context *fusion;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n");
+		return -ENOMEM;
+	}
+
+	fusion = instance->ctrl_context;
+
+	if (!fusion) {
+		megasas_return_cmd(instance, cmd);
+		return -ENXIO;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	size_map_info = fusion->current_map_sz;
+
+	ci = (void *) fusion->ld_map[(instance->map_id & 1)];
+	ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
+
+	if (!ci) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n");
+		megasas_return_cmd(instance, cmd);
+		return -ENOMEM;
+	}
+
+	memset(ci, 0, fusion->max_map_sz);
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0xFF;
+	dcmd->sge_count = 1;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = cpu_to_le32(size_map_info);
+	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
+
+	if (instance->ctrl_context && !instance->mask_interrupts)
+		ret = megasas_issue_blocked_cmd(instance, cmd,
+			MFI_IO_TIMEOUT_SECS);
+	else
+		ret = megasas_issue_polled(instance, cmd);
+
+	if (ret == DCMD_TIMEOUT && instance->ctrl_context)
+		megaraid_sas_kill_hba(instance);
+
+	megasas_return_cmd(instance, cmd);
+
+	return ret;
+}
+
+u8
+megasas_get_map_info(struct megasas_instance *instance)
+{
+	struct fusion_context *fusion = instance->ctrl_context;
+
+	fusion->fast_path_io = 0;
+	if (!megasas_get_ld_map_info(instance)) {
+		if (MR_ValidateMapInfo(instance)) {
+			fusion->fast_path_io = 1;
+			return 0;
+		}
+	}
+	return 1;
+}
+
+/*
+ * megasas_sync_map_info -	Returns FW's ld_map structure
+ * @instance:				Adapter soft state
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure.  This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+int
+megasas_sync_map_info(struct megasas_instance *instance)
+{
+	int i;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	u16 num_lds;
+	u32 size_sync_info;
+	struct fusion_context *fusion;
+	struct MR_LD_TARGET_SYNC *ci = NULL;
+	struct MR_DRV_RAID_MAP_ALL *map;
+	struct MR_LD_RAID  *raid;
+	struct MR_LD_TARGET_SYNC *ld_sync;
+	dma_addr_t ci_h = 0;
+	u32 size_map_info;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n");
+		return -ENOMEM;
+	}
+
+	fusion = instance->ctrl_context;
+
+	if (!fusion) {
+		megasas_return_cmd(instance, cmd);
+		return 1;
+	}
+
+	map = fusion->ld_drv_map[instance->map_id & 1];
+
+	num_lds = le16_to_cpu(map->raidMap.ldCount);
+
+	dcmd = &cmd->frame->dcmd;
+
+	size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds;
+
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	ci = (struct MR_LD_TARGET_SYNC *)
+	  fusion->ld_map[(instance->map_id - 1) & 1];
+	memset(ci, 0, fusion->max_map_sz);
+
+	ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1];
+
+	ld_sync = (struct MR_LD_TARGET_SYNC *)ci;
+
+	for (i = 0; i < num_lds; i++, ld_sync++) {
+		raid = MR_LdRaidGet(i, map);
+		ld_sync->targetId = MR_GetLDTgtId(i, map);
+		ld_sync->seqNum = raid->seqNum;
+	}
+
+	size_map_info = fusion->current_map_sz;
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0xFF;
+	dcmd->sge_count = 1;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = cpu_to_le32(size_map_info);
+	dcmd->mbox.b[0] = num_lds;
+	dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
+	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
+
+	instance->map_update_cmd = cmd;
+
+	instance->instancet->issue_dcmd(instance, cmd);
+
+	return 0;
+}
+
+/*
+ * meagasas_display_intel_branding - Display branding string
+ * @instance: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+megasas_display_intel_branding(struct megasas_instance *instance)
+{
+	if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
+		return;
+
+	switch (instance->pdev->device) {
+	case PCI_DEVICE_ID_LSI_INVADER:
+		switch (instance->pdev->subsystem_device) {
+		case MEGARAID_INTEL_RS3DC080_SSDID:
+			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+				instance->host->host_no,
+				MEGARAID_INTEL_RS3DC080_BRANDING);
+			break;
+		case MEGARAID_INTEL_RS3DC040_SSDID:
+			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+				instance->host->host_no,
+				MEGARAID_INTEL_RS3DC040_BRANDING);
+			break;
+		case MEGARAID_INTEL_RS3SC008_SSDID:
+			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+				instance->host->host_no,
+				MEGARAID_INTEL_RS3SC008_BRANDING);
+			break;
+		case MEGARAID_INTEL_RS3MC044_SSDID:
+			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+				instance->host->host_no,
+				MEGARAID_INTEL_RS3MC044_BRANDING);
+			break;
+		default:
+			break;
+		}
+		break;
+	case PCI_DEVICE_ID_LSI_FURY:
+		switch (instance->pdev->subsystem_device) {
+		case MEGARAID_INTEL_RS3WC080_SSDID:
+			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+				instance->host->host_no,
+				MEGARAID_INTEL_RS3WC080_BRANDING);
+			break;
+		case MEGARAID_INTEL_RS3WC040_SSDID:
+			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+				instance->host->host_no,
+				MEGARAID_INTEL_RS3WC040_BRANDING);
+			break;
+		default:
+			break;
+		}
+		break;
+	case PCI_DEVICE_ID_LSI_CUTLASS_52:
+	case PCI_DEVICE_ID_LSI_CUTLASS_53:
+		switch (instance->pdev->subsystem_device) {
+		case MEGARAID_INTEL_RMS3BC160_SSDID:
+			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+				instance->host->host_no,
+				MEGARAID_INTEL_RMS3BC160_BRANDING);
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * megasas_allocate_raid_maps -	Allocate memory for RAID maps
+ * @instance:				Adapter soft state
+ *
+ * return:				if success: return 0
+ *					failed:  return -ENOMEM
+ */
+static inline int megasas_allocate_raid_maps(struct megasas_instance *instance)
+{
+	struct fusion_context *fusion;
+	int i = 0;
+
+	fusion = instance->ctrl_context;
+
+	fusion->drv_map_pages = get_order(fusion->drv_map_sz);
+
+	for (i = 0; i < 2; i++) {
+		fusion->ld_map[i] = NULL;
+
+		fusion->ld_drv_map[i] = (void *)
+			__get_free_pages(__GFP_ZERO | GFP_KERNEL,
+					 fusion->drv_map_pages);
+
+		if (!fusion->ld_drv_map[i]) {
+			fusion->ld_drv_map[i] = vzalloc(fusion->drv_map_sz);
+
+			if (!fusion->ld_drv_map[i]) {
+				dev_err(&instance->pdev->dev,
+					"Could not allocate memory for local map"
+					" size requested: %d\n",
+					fusion->drv_map_sz);
+				goto ld_drv_map_alloc_fail;
+			}
+		}
+	}
+
+	for (i = 0; i < 2; i++) {
+		fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
+						       fusion->max_map_sz,
+						       &fusion->ld_map_phys[i],
+						       GFP_KERNEL);
+		if (!fusion->ld_map[i]) {
+			dev_err(&instance->pdev->dev,
+				"Could not allocate memory for map info %s:%d\n",
+				__func__, __LINE__);
+			goto ld_map_alloc_fail;
+		}
+	}
+
+	return 0;
+
+ld_map_alloc_fail:
+	for (i = 0; i < 2; i++) {
+		if (fusion->ld_map[i])
+			dma_free_coherent(&instance->pdev->dev,
+					  fusion->max_map_sz,
+					  fusion->ld_map[i],
+					  fusion->ld_map_phys[i]);
+	}
+
+ld_drv_map_alloc_fail:
+	for (i = 0; i < 2; i++) {
+		if (fusion->ld_drv_map[i]) {
+			if (is_vmalloc_addr(fusion->ld_drv_map[i]))
+				vfree(fusion->ld_drv_map[i]);
+			else
+				free_pages((ulong)fusion->ld_drv_map[i],
+					   fusion->drv_map_pages);
+		}
+	}
+
+	return -ENOMEM;
+}
+
+/**
+ * megasas_init_adapter_fusion -	Initializes the FW
+ * @instance:		Adapter soft state
+ *
+ * This is the main function for initializing firmware.
+ */
+u32
+megasas_init_adapter_fusion(struct megasas_instance *instance)
+{
+	struct megasas_register_set __iomem *reg_set;
+	struct fusion_context *fusion;
+	u16 max_cmd;
+	u32 scratch_pad_2;
+	int i = 0, count;
+
+	fusion = instance->ctrl_context;
+
+	reg_set = instance->reg_set;
+
+	megasas_fusion_update_can_queue(instance, PROBE_CONTEXT);
+
+	/*
+	 * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames
+	 */
+	instance->max_mfi_cmds =
+		MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS;
+
+	max_cmd = instance->max_fw_cmds;
+
+	fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
+
+	fusion->request_alloc_sz =
+	sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * instance->max_mpt_cmds;
+	fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
+		*(fusion->reply_q_depth);
+	fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
+		(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
+		* (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */
+
+	scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2);
+	/* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
+	 * Firmware support extended IO chain frame which is 4 times more than
+	 * legacy Firmware.
+	 * Legacy Firmware - Frame size is (8 * 128) = 1K
+	 * 1M IO Firmware  - Frame size is (8 * 128 * 4)  = 4K
+	 */
+	if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
+		instance->max_chain_frame_sz =
+			((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
+			MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO;
+	else
+		instance->max_chain_frame_sz =
+			((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
+			MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO;
+
+	if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) {
+		dev_warn(&instance->pdev->dev, "frame size %d invalid, fall back to legacy max frame size %d\n",
+			instance->max_chain_frame_sz,
+			MEGASAS_CHAIN_FRAME_SZ_MIN);
+		instance->max_chain_frame_sz = MEGASAS_CHAIN_FRAME_SZ_MIN;
+	}
+
+	fusion->max_sge_in_main_msg =
+		(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
+			- offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16;
+
+	fusion->max_sge_in_chain =
+		instance->max_chain_frame_sz
+			/ sizeof(union MPI2_SGE_IO_UNION);
+
+	instance->max_num_sge =
+		rounddown_pow_of_two(fusion->max_sge_in_main_msg
+			+ fusion->max_sge_in_chain - 2);
+
+	/* Used for pass thru MFI frame (DCMD) */
+	fusion->chain_offset_mfi_pthru =
+		offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16;
+
+	fusion->chain_offset_io_request =
+		(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
+		 sizeof(union MPI2_SGE_IO_UNION))/16;
+
+	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+	for (i = 0 ; i < count; i++)
+		fusion->last_reply_idx[i] = 0;
+
+	/*
+	 * For fusion adapters, 3 commands for IOCTL and 8 commands
+	 * for driver's internal DCMDs.
+	 */
+	instance->max_scsi_cmds = instance->max_fw_cmds -
+				(MEGASAS_FUSION_INTERNAL_CMDS +
+				MEGASAS_FUSION_IOCTL_CMDS);
+	sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
+
+	/*
+	 * Allocate memory for descriptors
+	 * Create a pool of commands
+	 */
+	if (megasas_alloc_cmds(instance))
+		goto fail_alloc_mfi_cmds;
+	if (megasas_alloc_cmds_fusion(instance))
+		goto fail_alloc_cmds;
+
+	if (megasas_ioc_init_fusion(instance))
+		goto fail_ioc_init;
+
+	megasas_display_intel_branding(instance);
+	if (megasas_get_ctrl_info(instance)) {
+		dev_err(&instance->pdev->dev,
+			"Could not get controller info. Fail from %s %d\n",
+			__func__, __LINE__);
+		goto fail_ioc_init;
+	}
+
+	instance->flag_ieee = 1;
+	instance->r1_ldio_hint_default =  MR_R1_LDIO_PIGGYBACK_DEFAULT;
+	fusion->fast_path_io = 0;
+
+	if (megasas_allocate_raid_maps(instance))
+		goto fail_ioc_init;
+
+	if (!megasas_get_map_info(instance))
+		megasas_sync_map_info(instance);
+
+	return 0;
+
+fail_ioc_init:
+	megasas_free_cmds_fusion(instance);
+fail_alloc_cmds:
+	megasas_free_cmds(instance);
+fail_alloc_mfi_cmds:
+	return 1;
+}
+
+/**
+ * map_cmd_status -	Maps FW cmd status to OS cmd status
+ * @cmd :		Pointer to cmd
+ * @status :		status of cmd returned by FW
+ * @ext_status :	ext status of cmd returned by FW
+ */
+
+void
+map_cmd_status(struct fusion_context *fusion,
+		struct scsi_cmnd *scmd, u8 status, u8 ext_status,
+		u32 data_length, u8 *sense)
+{
+	u8 cmd_type;
+	int resid;
+
+	cmd_type = megasas_cmd_type(scmd);
+	switch (status) {
+
+	case MFI_STAT_OK:
+		scmd->result = DID_OK << 16;
+		break;
+
+	case MFI_STAT_SCSI_IO_FAILED:
+	case MFI_STAT_LD_INIT_IN_PROGRESS:
+		scmd->result = (DID_ERROR << 16) | ext_status;
+		break;
+
+	case MFI_STAT_SCSI_DONE_WITH_ERROR:
+
+		scmd->result = (DID_OK << 16) | ext_status;
+		if (ext_status == SAM_STAT_CHECK_CONDITION) {
+			memset(scmd->sense_buffer, 0,
+			       SCSI_SENSE_BUFFERSIZE);
+			memcpy(scmd->sense_buffer, sense,
+			       SCSI_SENSE_BUFFERSIZE);
+			scmd->result |= DRIVER_SENSE << 24;
+		}
+
+		/*
+		 * If the  IO request is partially completed, then MR FW will
+		 * update "io_request->DataLength" field with actual number of
+		 * bytes transferred.Driver will set residual bytes count in
+		 * SCSI command structure.
+		 */
+		resid = (scsi_bufflen(scmd) - data_length);
+		scsi_set_resid(scmd, resid);
+
+		if (resid &&
+			((cmd_type == READ_WRITE_LDIO) ||
+			(cmd_type == READ_WRITE_SYSPDIO)))
+			scmd_printk(KERN_INFO, scmd, "BRCM Debug mfi stat 0x%x, data len"
+				" requested/completed 0x%x/0x%x\n",
+				status, scsi_bufflen(scmd), data_length);
+		break;
+
+	case MFI_STAT_LD_OFFLINE:
+	case MFI_STAT_DEVICE_NOT_FOUND:
+		scmd->result = DID_BAD_TARGET << 16;
+		break;
+	case MFI_STAT_CONFIG_SEQ_MISMATCH:
+		scmd->result = DID_IMM_RETRY << 16;
+		break;
+	default:
+		scmd->result = DID_ERROR << 16;
+		break;
+	}
+}
+
+/**
+ * megasas_is_prp_possible -
+ * Checks if native NVMe PRPs can be built for the IO
+ *
+ * @instance:		Adapter soft state
+ * @scmd:		SCSI command from the mid-layer
+ * @sge_count:		scatter gather element count.
+ *
+ * Returns:		true: PRPs can be built
+ *			false: IEEE SGLs needs to be built
+ */
+static bool
+megasas_is_prp_possible(struct megasas_instance *instance,
+			struct scsi_cmnd *scmd, int sge_count)
+{
+	struct fusion_context *fusion;
+	int i;
+	u32 data_length = 0;
+	struct scatterlist *sg_scmd;
+	bool build_prp = false;
+	u32 mr_nvme_pg_size;
+
+	mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
+				MR_DEFAULT_NVME_PAGE_SIZE);
+	fusion = instance->ctrl_context;
+	data_length = scsi_bufflen(scmd);
+	sg_scmd = scsi_sglist(scmd);
+
+	/*
+	 * NVMe uses one PRP for each page (or part of a page)
+	 * look at the data length - if 4 pages or less then IEEE is OK
+	 * if  > 5 pages then we need to build a native SGL
+	 * if > 4 and <= 5 pages, then check physical address of 1st SG entry
+	 * if this first size in the page is >= the residual beyond 4 pages
+	 * then use IEEE, otherwise use native SGL
+	 */
+
+	if (data_length > (mr_nvme_pg_size * 5)) {
+		build_prp = true;
+	} else if ((data_length > (mr_nvme_pg_size * 4)) &&
+			(data_length <= (mr_nvme_pg_size * 5)))  {
+		/* check if 1st SG entry size is < residual beyond 4 pages */
+		if (sg_dma_len(sg_scmd) < (data_length - (mr_nvme_pg_size * 4)))
+			build_prp = true;
+	}
+
+/*
+ * Below code detects gaps/holes in IO data buffers.
+ * What does holes/gaps mean?
+ * Any SGE except first one in a SGL starts at non NVME page size
+ * aligned address OR Any SGE except last one in a SGL ends at
+ * non NVME page size boundary.
+ *
+ * Driver has already informed block layer by setting boundary rules for
+ * bio merging done at NVME page size boundary calling kernel API
+ * blk_queue_virt_boundary inside slave_config.
+ * Still there is possibility of IO coming with holes to driver because of
+ * IO merging done by IO scheduler.
+ *
+ * With SCSI BLK MQ enabled, there will be no IO with holes as there is no
+ * IO scheduling so no IO merging.
+ *
+ * With SCSI BLK MQ disabled, IO scheduler may attempt to merge IOs and
+ * then sending IOs with holes.
+ *
+ * Though driver can request block layer to disable IO merging by calling-
+ * queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue) but
+ * user may tune sysfs parameter- nomerges again to 0 or 1.
+ *
+ * If in future IO scheduling is enabled with SCSI BLK MQ,
+ * this algorithm to detect holes will be required in driver
+ * for SCSI BLK MQ enabled case as well.
+ *
+ *
+ */
+	scsi_for_each_sg(scmd, sg_scmd, sge_count, i) {
+		if ((i != 0) && (i != (sge_count - 1))) {
+			if (mega_mod64(sg_dma_len(sg_scmd), mr_nvme_pg_size) ||
+			    mega_mod64(sg_dma_address(sg_scmd),
+				       mr_nvme_pg_size)) {
+				build_prp = false;
+				atomic_inc(&instance->sge_holes_type1);
+				break;
+			}
+		}
+
+		if ((sge_count > 1) && (i == 0)) {
+			if ((mega_mod64((sg_dma_address(sg_scmd) +
+					sg_dma_len(sg_scmd)),
+					mr_nvme_pg_size))) {
+				build_prp = false;
+				atomic_inc(&instance->sge_holes_type2);
+				break;
+			}
+		}
+
+		if ((sge_count > 1) && (i == (sge_count - 1))) {
+			if (mega_mod64(sg_dma_address(sg_scmd),
+				       mr_nvme_pg_size)) {
+				build_prp = false;
+				atomic_inc(&instance->sge_holes_type3);
+				break;
+			}
+		}
+	}
+
+	return build_prp;
+}
+
+/**
+ * megasas_make_prp_nvme -
+ * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
+ *
+ * @instance:		Adapter soft state
+ * @scmd:		SCSI command from the mid-layer
+ * @sgl_ptr:		SGL to be filled in
+ * @cmd:		Fusion command frame
+ * @sge_count:		scatter gather element count.
+ *
+ * Returns:		true: PRPs are built
+ *			false: IEEE SGLs needs to be built
+ */
+static bool
+megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd,
+		      struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
+		      struct megasas_cmd_fusion *cmd, int sge_count)
+{
+	int sge_len, offset, num_prp_in_chain = 0;
+	struct MPI25_IEEE_SGE_CHAIN64 *main_chain_element, *ptr_first_sgl;
+	u64 *ptr_sgl;
+	dma_addr_t ptr_sgl_phys;
+	u64 sge_addr;
+	u32 page_mask, page_mask_result;
+	struct scatterlist *sg_scmd;
+	u32 first_prp_len;
+	bool build_prp = false;
+	int data_len = scsi_bufflen(scmd);
+	struct fusion_context *fusion;
+	u32 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
+					MR_DEFAULT_NVME_PAGE_SIZE);
+
+	fusion = instance->ctrl_context;
+
+	build_prp = megasas_is_prp_possible(instance, scmd, sge_count);
+
+	if (!build_prp)
+		return false;
+
+	/*
+	 * Nvme has a very convoluted prp format.  One prp is required
+	 * for each page or partial page. Driver need to split up OS sg_list
+	 * entries if it is longer than one page or cross a page
+	 * boundary.  Driver also have to insert a PRP list pointer entry as
+	 * the last entry in each physical page of the PRP list.
+	 *
+	 * NOTE: The first PRP "entry" is actually placed in the first
+	 * SGL entry in the main message as IEEE 64 format.  The 2nd
+	 * entry in the main message is the chain element, and the rest
+	 * of the PRP entries are built in the contiguous pcie buffer.
+	 */
+	page_mask = mr_nvme_pg_size - 1;
+	ptr_sgl = (u64 *)cmd->sg_frame;
+	ptr_sgl_phys = cmd->sg_frame_phys_addr;
+	memset(ptr_sgl, 0, instance->max_chain_frame_sz);
+
+	/* Build chain frame element which holds all prps except first*/
+	main_chain_element = (struct MPI25_IEEE_SGE_CHAIN64 *)
+	    ((u8 *)sgl_ptr + sizeof(struct MPI25_IEEE_SGE_CHAIN64));
+
+	main_chain_element->Address = cpu_to_le64(ptr_sgl_phys);
+	main_chain_element->NextChainOffset = 0;
+	main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+					IEEE_SGE_FLAGS_SYSTEM_ADDR |
+					MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
+
+	/* Build first prp, sge need not to be page aligned*/
+	ptr_first_sgl = sgl_ptr;
+	sg_scmd = scsi_sglist(scmd);
+	sge_addr = sg_dma_address(sg_scmd);
+	sge_len = sg_dma_len(sg_scmd);
+
+	offset = (u32)(sge_addr & page_mask);
+	first_prp_len = mr_nvme_pg_size - offset;
+
+	ptr_first_sgl->Address = cpu_to_le64(sge_addr);
+	ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
+
+	data_len -= first_prp_len;
+
+	if (sge_len > first_prp_len) {
+		sge_addr += first_prp_len;
+		sge_len -= first_prp_len;
+	} else if (sge_len == first_prp_len) {
+		sg_scmd = sg_next(sg_scmd);
+		sge_addr = sg_dma_address(sg_scmd);
+		sge_len = sg_dma_len(sg_scmd);
+	}
+
+	for (;;) {
+		offset = (u32)(sge_addr & page_mask);
+
+		/* Put PRP pointer due to page boundary*/
+		page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask;
+		if (unlikely(!page_mask_result)) {
+			scmd_printk(KERN_NOTICE,
+				    scmd, "page boundary ptr_sgl: 0x%p\n",
+				    ptr_sgl);
+			ptr_sgl_phys += 8;
+			*ptr_sgl = cpu_to_le64(ptr_sgl_phys);
+			ptr_sgl++;
+			num_prp_in_chain++;
+		}
+
+		*ptr_sgl = cpu_to_le64(sge_addr);
+		ptr_sgl++;
+		ptr_sgl_phys += 8;
+		num_prp_in_chain++;
+
+		sge_addr += mr_nvme_pg_size;
+		sge_len -= mr_nvme_pg_size;
+		data_len -= mr_nvme_pg_size;
+
+		if (data_len <= 0)
+			break;
+
+		if (sge_len > 0)
+			continue;
+
+		sg_scmd = sg_next(sg_scmd);
+		sge_addr = sg_dma_address(sg_scmd);
+		sge_len = sg_dma_len(sg_scmd);
+	}
+
+	main_chain_element->Length =
+			cpu_to_le32(num_prp_in_chain * sizeof(u64));
+
+	atomic_inc(&instance->prp_sgl);
+	return build_prp;
+}
+
+/**
+ * megasas_make_sgl_fusion -	Prepares 32-bit SGL
+ * @instance:		Adapter soft state
+ * @scp:		SCSI command from the mid-layer
+ * @sgl_ptr:		SGL to be filled in
+ * @cmd:		cmd we are working on
+ * @sge_count		sge count
+ *
+ */
+static void
+megasas_make_sgl_fusion(struct megasas_instance *instance,
+			struct scsi_cmnd *scp,
+			struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
+			struct megasas_cmd_fusion *cmd, int sge_count)
+{
+	int i, sg_processed;
+	struct scatterlist *os_sgl;
+	struct fusion_context *fusion;
+
+	fusion = instance->ctrl_context;
+
+	if (instance->adapter_type >= INVADER_SERIES) {
+		struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
+		sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
+		sgl_ptr_end->Flags = 0;
+	}
+
+	scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+		sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
+		sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
+		sgl_ptr->Flags = 0;
+		if (instance->adapter_type >= INVADER_SERIES)
+			if (i == sge_count - 1)
+				sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
+		sgl_ptr++;
+		sg_processed = i + 1;
+
+		if ((sg_processed ==  (fusion->max_sge_in_main_msg - 1)) &&
+		    (sge_count > fusion->max_sge_in_main_msg)) {
+
+			struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
+			if (instance->adapter_type >= INVADER_SERIES) {
+				if ((le16_to_cpu(cmd->io_request->IoFlags) &
+					MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
+					MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
+					cmd->io_request->ChainOffset =
+						fusion->
+						chain_offset_io_request;
+				else
+					cmd->io_request->ChainOffset = 0;
+			} else
+				cmd->io_request->ChainOffset =
+					fusion->chain_offset_io_request;
+
+			sg_chain = sgl_ptr;
+			/* Prepare chain element */
+			sg_chain->NextChainOffset = 0;
+			if (instance->adapter_type >= INVADER_SERIES)
+				sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
+			else
+				sg_chain->Flags =
+					(IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+					 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
+			sg_chain->Length =  cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed)));
+			sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr);
+
+			sgl_ptr =
+			  (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
+			memset(sgl_ptr, 0, instance->max_chain_frame_sz);
+		}
+	}
+	atomic_inc(&instance->ieee_sgl);
+}
+
+/**
+ * megasas_make_sgl -	Build Scatter Gather List(SGLs)
+ * @scp:		SCSI command pointer
+ * @instance:		Soft instance of controller
+ * @cmd:		Fusion command pointer
+ *
+ * This function will build sgls based on device type.
+ * For nvme drives, there is different way of building sgls in nvme native
+ * format- PRPs(Physical Region Page).
+ *
+ * Returns the number of sg lists actually used, zero if the sg lists
+ * is NULL, or -ENOMEM if the mapping failed
+ */
+static
+int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp,
+		     struct megasas_cmd_fusion *cmd)
+{
+	int sge_count;
+	bool build_prp = false;
+	struct MPI25_IEEE_SGE_CHAIN64 *sgl_chain64;
+
+	sge_count = scsi_dma_map(scp);
+
+	if ((sge_count > instance->max_num_sge) || (sge_count <= 0))
+		return sge_count;
+
+	sgl_chain64 = (struct MPI25_IEEE_SGE_CHAIN64 *)&cmd->io_request->SGL;
+	if ((le16_to_cpu(cmd->io_request->IoFlags) &
+	    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) &&
+	    (cmd->pd_interface == NVME_PD))
+		build_prp = megasas_make_prp_nvme(instance, scp, sgl_chain64,
+						  cmd, sge_count);
+
+	if (!build_prp)
+		megasas_make_sgl_fusion(instance, scp, sgl_chain64,
+					cmd, sge_count);
+
+	return sge_count;
+}
+
+/**
+ * megasas_set_pd_lba -	Sets PD LBA
+ * @cdb:		CDB
+ * @cdb_len:		cdb length
+ * @start_blk:		Start block of IO
+ *
+ * Used to set the PD LBA in CDB for FP IOs
+ */
+void
+megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
+		   struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
+		   struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
+{
+	struct MR_LD_RAID *raid;
+	u16 ld;
+	u64 start_blk = io_info->pdBlock;
+	u8 *cdb = io_request->CDB.CDB32;
+	u32 num_blocks = io_info->numBlocks;
+	u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
+
+	/* Check if T10 PI (DIF) is enabled for this LD */
+	ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
+	raid = MR_LdRaidGet(ld, local_map_ptr);
+	if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
+		memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+		cdb[0] =  MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
+		cdb[7] =  MEGASAS_SCSI_ADDL_CDB_LEN;
+
+		if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+			cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
+		else
+			cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
+		cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL;
+
+		/* LBA */
+		cdb[12] = (u8)((start_blk >> 56) & 0xff);
+		cdb[13] = (u8)((start_blk >> 48) & 0xff);
+		cdb[14] = (u8)((start_blk >> 40) & 0xff);
+		cdb[15] = (u8)((start_blk >> 32) & 0xff);
+		cdb[16] = (u8)((start_blk >> 24) & 0xff);
+		cdb[17] = (u8)((start_blk >> 16) & 0xff);
+		cdb[18] = (u8)((start_blk >> 8) & 0xff);
+		cdb[19] = (u8)(start_blk & 0xff);
+
+		/* Logical block reference tag */
+		io_request->CDB.EEDP32.PrimaryReferenceTag =
+			cpu_to_be32(ref_tag);
+		io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff);
+		io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
+
+		/* Transfer length */
+		cdb[28] = (u8)((num_blocks >> 24) & 0xff);
+		cdb[29] = (u8)((num_blocks >> 16) & 0xff);
+		cdb[30] = (u8)((num_blocks >> 8) & 0xff);
+		cdb[31] = (u8)(num_blocks & 0xff);
+
+		/* set SCSI IO EEDPFlags */
+		if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
+			io_request->EEDPFlags = cpu_to_le16(
+				MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG  |
+				MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
+				MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
+				MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
+				MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE |
+				MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
+		} else {
+			io_request->EEDPFlags = cpu_to_le16(
+				MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+				MPI2_SCSIIO_EEDPFLAGS_INSERT_OP);
+		}
+		io_request->Control |= cpu_to_le32((0x4 << 26));
+		io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size);
+	} else {
+		/* Some drives don't support 16/12 byte CDB's, convert to 10 */
+		if (((cdb_len == 12) || (cdb_len == 16)) &&
+		    (start_blk <= 0xffffffff)) {
+			if (cdb_len == 16) {
+				opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
+				flagvals = cdb[1];
+				groupnum = cdb[14];
+				control = cdb[15];
+			} else {
+				opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
+				flagvals = cdb[1];
+				groupnum = cdb[10];
+				control = cdb[11];
+			}
+
+			memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+
+			cdb[0] = opcode;
+			cdb[1] = flagvals;
+			cdb[6] = groupnum;
+			cdb[9] = control;
+
+			/* Transfer length */
+			cdb[8] = (u8)(num_blocks & 0xff);
+			cdb[7] = (u8)((num_blocks >> 8) & 0xff);
+
+			io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */
+			cdb_len = 10;
+		} else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
+			/* Convert to 16 byte CDB for large LBA's */
+			switch (cdb_len) {
+			case 6:
+				opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
+				control = cdb[5];
+				break;
+			case 10:
+				opcode =
+					cdb[0] == READ_10 ? READ_16 : WRITE_16;
+				flagvals = cdb[1];
+				groupnum = cdb[6];
+				control = cdb[9];
+				break;
+			case 12:
+				opcode =
+					cdb[0] == READ_12 ? READ_16 : WRITE_16;
+				flagvals = cdb[1];
+				groupnum = cdb[10];
+				control = cdb[11];
+				break;
+			}
+
+			memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+
+			cdb[0] = opcode;
+			cdb[1] = flagvals;
+			cdb[14] = groupnum;
+			cdb[15] = control;
+
+			/* Transfer length */
+			cdb[13] = (u8)(num_blocks & 0xff);
+			cdb[12] = (u8)((num_blocks >> 8) & 0xff);
+			cdb[11] = (u8)((num_blocks >> 16) & 0xff);
+			cdb[10] = (u8)((num_blocks >> 24) & 0xff);
+
+			io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */
+			cdb_len = 16;
+		}
+
+		/* Normal case, just load LBA here */
+		switch (cdb_len) {
+		case 6:
+		{
+			u8 val = cdb[1] & 0xE0;
+			cdb[3] = (u8)(start_blk & 0xff);
+			cdb[2] = (u8)((start_blk >> 8) & 0xff);
+			cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f);
+			break;
+		}
+		case 10:
+			cdb[5] = (u8)(start_blk & 0xff);
+			cdb[4] = (u8)((start_blk >> 8) & 0xff);
+			cdb[3] = (u8)((start_blk >> 16) & 0xff);
+			cdb[2] = (u8)((start_blk >> 24) & 0xff);
+			break;
+		case 12:
+			cdb[5]    = (u8)(start_blk & 0xff);
+			cdb[4]    = (u8)((start_blk >> 8) & 0xff);
+			cdb[3]    = (u8)((start_blk >> 16) & 0xff);
+			cdb[2]    = (u8)((start_blk >> 24) & 0xff);
+			break;
+		case 16:
+			cdb[9]    = (u8)(start_blk & 0xff);
+			cdb[8]    = (u8)((start_blk >> 8) & 0xff);
+			cdb[7]    = (u8)((start_blk >> 16) & 0xff);
+			cdb[6]    = (u8)((start_blk >> 24) & 0xff);
+			cdb[5]    = (u8)((start_blk >> 32) & 0xff);
+			cdb[4]    = (u8)((start_blk >> 40) & 0xff);
+			cdb[3]    = (u8)((start_blk >> 48) & 0xff);
+			cdb[2]    = (u8)((start_blk >> 56) & 0xff);
+			break;
+		}
+	}
+}
+
+/**
+ * megasas_stream_detect -	stream detection on read and and write IOs
+ * @instance:		Adapter soft state
+ * @cmd:		    Command to be prepared
+ * @io_info:		IO Request info
+ *
+ */
+
+/** stream detection on read and and write IOs */
+static void megasas_stream_detect(struct megasas_instance *instance,
+				  struct megasas_cmd_fusion *cmd,
+				  struct IO_REQUEST_INFO *io_info)
+{
+	struct fusion_context *fusion = instance->ctrl_context;
+	u32 device_id = io_info->ldTgtId;
+	struct LD_STREAM_DETECT *current_ld_sd
+		= fusion->stream_detect_by_ld[device_id];
+	u32 *track_stream = &current_ld_sd->mru_bit_map, stream_num;
+	u32 shifted_values, unshifted_values;
+	u32 index_value_mask, shifted_values_mask;
+	int i;
+	bool is_read_ahead = false;
+	struct STREAM_DETECT *current_sd;
+	/* find possible stream */
+	for (i = 0; i < MAX_STREAMS_TRACKED; ++i) {
+		stream_num = (*track_stream >>
+			(i * BITS_PER_INDEX_STREAM)) &
+			STREAM_MASK;
+		current_sd = &current_ld_sd->stream_track[stream_num];
+		/* if we found a stream, update the raid
+		 *  context and also update the mruBitMap
+		 */
+		/*	boundary condition */
+		if ((current_sd->next_seq_lba) &&
+		    (io_info->ldStartBlock >= current_sd->next_seq_lba) &&
+		    (io_info->ldStartBlock <= (current_sd->next_seq_lba + 32)) &&
+		    (current_sd->is_read == io_info->isRead)) {
+
+			if ((io_info->ldStartBlock != current_sd->next_seq_lba)	&&
+			    ((!io_info->isRead) || (!is_read_ahead)))
+				/*
+				 * Once the API availible we need to change this.
+				 * At this point we are not allowing any gap
+				 */
+				continue;
+
+			SET_STREAM_DETECTED(cmd->io_request->RaidContext.raid_context_g35);
+			current_sd->next_seq_lba =
+			io_info->ldStartBlock + io_info->numBlocks;
+			/*
+			 *	update the mruBitMap LRU
+			 */
+			shifted_values_mask =
+				(1 <<  i * BITS_PER_INDEX_STREAM) - 1;
+			shifted_values = ((*track_stream & shifted_values_mask)
+						<< BITS_PER_INDEX_STREAM);
+			index_value_mask =
+				STREAM_MASK << i * BITS_PER_INDEX_STREAM;
+			unshifted_values =
+				*track_stream & ~(shifted_values_mask |
+				index_value_mask);
+			*track_stream =
+				unshifted_values | shifted_values | stream_num;
+			return;
+		}
+	}
+	/*
+	 * if we did not find any stream, create a new one
+	 * from the least recently used
+	 */
+	stream_num = (*track_stream >>
+		((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) &
+		STREAM_MASK;
+	current_sd = &current_ld_sd->stream_track[stream_num];
+	current_sd->is_read = io_info->isRead;
+	current_sd->next_seq_lba = io_info->ldStartBlock + io_info->numBlocks;
+	*track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | stream_num);
+	return;
+}
+
+/**
+ * megasas_set_raidflag_cpu_affinity - This function sets the cpu
+ * affinity (cpu of the controller) and raid_flags in the raid context
+ * based on IO type.
+ *
+ * @praid_context:	IO RAID context
+ * @raid:		LD raid map
+ * @fp_possible:	Is fast path possible?
+ * @is_read:		Is read IO?
+ *
+ */
+static void
+megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context,
+				  struct MR_LD_RAID *raid, bool fp_possible,
+				  u8 is_read, u32 scsi_buff_len)
+{
+	u8 cpu_sel = MR_RAID_CTX_CPUSEL_0;
+	struct RAID_CONTEXT_G35 *rctx_g35;
+
+	rctx_g35 = &praid_context->raid_context_g35;
+	if (fp_possible) {
+		if (is_read) {
+			if ((raid->cpuAffinity.pdRead.cpu0) &&
+			    (raid->cpuAffinity.pdRead.cpu1))
+				cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
+			else if (raid->cpuAffinity.pdRead.cpu1)
+				cpu_sel = MR_RAID_CTX_CPUSEL_1;
+		} else {
+			if ((raid->cpuAffinity.pdWrite.cpu0) &&
+			    (raid->cpuAffinity.pdWrite.cpu1))
+				cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
+			else if (raid->cpuAffinity.pdWrite.cpu1)
+				cpu_sel = MR_RAID_CTX_CPUSEL_1;
+			/* Fast path cache by pass capable R0/R1 VD */
+			if ((raid->level <= 1) &&
+			    (raid->capability.fp_cache_bypass_capable)) {
+				rctx_g35->routing_flags |=
+					(1 << MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT);
+				rctx_g35->raid_flags =
+					(MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS
+					<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
+			}
+		}
+	} else {
+		if (is_read) {
+			if ((raid->cpuAffinity.ldRead.cpu0) &&
+			    (raid->cpuAffinity.ldRead.cpu1))
+				cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
+			else if (raid->cpuAffinity.ldRead.cpu1)
+				cpu_sel = MR_RAID_CTX_CPUSEL_1;
+		} else {
+			if ((raid->cpuAffinity.ldWrite.cpu0) &&
+			    (raid->cpuAffinity.ldWrite.cpu1))
+				cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
+			else if (raid->cpuAffinity.ldWrite.cpu1)
+				cpu_sel = MR_RAID_CTX_CPUSEL_1;
+
+			if (is_stream_detected(rctx_g35) &&
+			    ((raid->level == 5) || (raid->level == 6)) &&
+			    (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) &&
+			    (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS))
+				cpu_sel = MR_RAID_CTX_CPUSEL_0;
+		}
+	}
+
+	rctx_g35->routing_flags |=
+		(cpu_sel << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT);
+
+	/* Always give priority to MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
+	 * vs MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS.
+	 * IO Subtype is not bitmap.
+	 */
+	if ((raid->level == 1) && (!is_read)) {
+		if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
+			praid_context->raid_context_g35.raid_flags =
+				(MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
+				<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
+	}
+}
+
+/**
+ * megasas_build_ldio_fusion -	Prepares IOs to devices
+ * @instance:		Adapter soft state
+ * @scp:		SCSI command
+ * @cmd:		Command to be prepared
+ *
+ * Prepares the io_request and chain elements (sg_frame) for IO
+ * The IO can be for PD (Fast Path) or LD
+ */
+void
+megasas_build_ldio_fusion(struct megasas_instance *instance,
+			  struct scsi_cmnd *scp,
+			  struct megasas_cmd_fusion *cmd)
+{
+	bool fp_possible;
+	u16 ld;
+	u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
+	u32 scsi_buff_len;
+	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
+	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+	struct IO_REQUEST_INFO io_info;
+	struct fusion_context *fusion;
+	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
+	u8 *raidLUN;
+	unsigned long spinlock_flags;
+	union RAID_CONTEXT_UNION *praid_context;
+	struct MR_LD_RAID *raid = NULL;
+	struct MR_PRIV_DEVICE *mrdev_priv;
+
+	device_id = MEGASAS_DEV_INDEX(scp);
+
+	fusion = instance->ctrl_context;
+
+	io_request = cmd->io_request;
+	io_request->RaidContext.raid_context.virtual_disk_tgt_id =
+		cpu_to_le16(device_id);
+	io_request->RaidContext.raid_context.status = 0;
+	io_request->RaidContext.raid_context.ex_status = 0;
+
+	req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
+
+	start_lba_lo = 0;
+	start_lba_hi = 0;
+	fp_possible = false;
+
+	/*
+	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
+	 */
+	if (scp->cmd_len == 6) {
+		datalength = (u32) scp->cmnd[4];
+		start_lba_lo = ((u32) scp->cmnd[1] << 16) |
+			((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
+
+		start_lba_lo &= 0x1FFFFF;
+	}
+
+	/*
+	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
+	 */
+	else if (scp->cmd_len == 10) {
+		datalength = (u32) scp->cmnd[8] |
+			((u32) scp->cmnd[7] << 8);
+		start_lba_lo = ((u32) scp->cmnd[2] << 24) |
+			((u32) scp->cmnd[3] << 16) |
+			((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+	}
+
+	/*
+	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
+	 */
+	else if (scp->cmd_len == 12) {
+		datalength = ((u32) scp->cmnd[6] << 24) |
+			((u32) scp->cmnd[7] << 16) |
+			((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+		start_lba_lo = ((u32) scp->cmnd[2] << 24) |
+			((u32) scp->cmnd[3] << 16) |
+			((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+	}
+
+	/*
+	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
+	 */
+	else if (scp->cmd_len == 16) {
+		datalength = ((u32) scp->cmnd[10] << 24) |
+			((u32) scp->cmnd[11] << 16) |
+			((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
+		start_lba_lo = ((u32) scp->cmnd[6] << 24) |
+			((u32) scp->cmnd[7] << 16) |
+			((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+
+		start_lba_hi = ((u32) scp->cmnd[2] << 24) |
+			((u32) scp->cmnd[3] << 16) |
+			((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+	}
+
+	memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
+	io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
+	io_info.numBlocks = datalength;
+	io_info.ldTgtId = device_id;
+	io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
+	scsi_buff_len = scsi_bufflen(scp);
+	io_request->DataLength = cpu_to_le32(scsi_buff_len);
+
+	if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+		io_info.isRead = 1;
+
+	local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
+	ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
+
+	if (ld < instance->fw_supported_vd_count)
+		raid = MR_LdRaidGet(ld, local_map_ptr);
+
+	if (!raid || (!fusion->fast_path_io)) {
+		io_request->RaidContext.raid_context.reg_lock_flags  = 0;
+		fp_possible = false;
+	} else {
+		if (MR_BuildRaidContext(instance, &io_info,
+					&io_request->RaidContext.raid_context,
+					local_map_ptr, &raidLUN))
+			fp_possible = (io_info.fpOkForIo > 0) ? true : false;
+	}
+
+	cmd->request_desc->SCSIIO.MSIxIndex =
+		instance->reply_map[raw_smp_processor_id()];
+
+	praid_context = &io_request->RaidContext;
+
+	if (instance->adapter_type == VENTURA_SERIES) {
+		spin_lock_irqsave(&instance->stream_lock, spinlock_flags);
+		megasas_stream_detect(instance, cmd, &io_info);
+		spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags);
+		/* In ventura if stream detected for a read and it is read ahead
+		 *  capable make this IO as LDIO
+		 */
+		if (is_stream_detected(&io_request->RaidContext.raid_context_g35) &&
+		    io_info.isRead && io_info.ra_capable)
+			fp_possible = false;
+
+		/* FP for Optimal raid level 1.
+		 * All large RAID-1 writes (> 32 KiB, both WT and WB modes)
+		 * are built by the driver as LD I/Os.
+		 * All small RAID-1 WT writes (<= 32 KiB) are built as FP I/Os
+		 * (there is never a reason to process these as buffered writes)
+		 * All small RAID-1 WB writes (<= 32 KiB) are built as FP I/Os
+		 * with the SLD bit asserted.
+		 */
+		if (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
+			mrdev_priv = scp->device->hostdata;
+
+			if (atomic_inc_return(&instance->fw_outstanding) >
+				(instance->host->can_queue)) {
+				fp_possible = false;
+				atomic_dec(&instance->fw_outstanding);
+			} else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
+				   (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0)) {
+				fp_possible = false;
+				atomic_dec(&instance->fw_outstanding);
+				if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
+					atomic_set(&mrdev_priv->r1_ldio_hint,
+						   instance->r1_ldio_hint_default);
+			}
+		}
+
+		/* If raid is NULL, set CPU affinity to default CPU0 */
+		if (raid)
+			megasas_set_raidflag_cpu_affinity(praid_context,
+				raid, fp_possible, io_info.isRead,
+				scsi_buff_len);
+		else
+			praid_context->raid_context_g35.routing_flags |=
+				(MR_RAID_CTX_CPUSEL_0 << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT);
+	}
+
+	if (fp_possible) {
+		megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
+				   local_map_ptr, start_lba_lo);
+		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+		cmd->request_desc->SCSIIO.RequestFlags =
+			(MPI2_REQ_DESCRIPT_FLAGS_FP_IO
+			 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+		if (instance->adapter_type == INVADER_SERIES) {
+			if (io_request->RaidContext.raid_context.reg_lock_flags ==
+			    REGION_TYPE_UNUSED)
+				cmd->request_desc->SCSIIO.RequestFlags =
+					(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
+					MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+			io_request->RaidContext.raid_context.type
+				= MPI2_TYPE_CUDA;
+			io_request->RaidContext.raid_context.nseg = 0x1;
+			io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
+			io_request->RaidContext.raid_context.reg_lock_flags |=
+			  (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
+			   MR_RL_FLAGS_SEQ_NUM_ENABLE);
+		} else if (instance->adapter_type == VENTURA_SERIES) {
+			io_request->RaidContext.raid_context_g35.nseg_type |=
+						(1 << RAID_CONTEXT_NSEG_SHIFT);
+			io_request->RaidContext.raid_context_g35.nseg_type |=
+						(MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
+			io_request->RaidContext.raid_context_g35.routing_flags |=
+						(1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
+			io_request->IoFlags |=
+				cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
+		}
+		if (fusion->load_balance_info &&
+			(fusion->load_balance_info[device_id].loadBalanceFlag) &&
+			(io_info.isRead)) {
+			io_info.devHandle =
+				get_updated_dev_handle(instance,
+					&fusion->load_balance_info[device_id],
+					&io_info, local_map_ptr);
+			scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
+			cmd->pd_r1_lb = io_info.pd_after_lb;
+			if (instance->adapter_type == VENTURA_SERIES)
+				io_request->RaidContext.raid_context_g35.span_arm
+					= io_info.span_arm;
+			else
+				io_request->RaidContext.raid_context.span_arm
+					= io_info.span_arm;
+
+		} else
+			scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
+
+		if (instance->adapter_type == VENTURA_SERIES)
+			cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
+		else
+			cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
+
+		if ((raidLUN[0] == 1) &&
+			(local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) {
+			instance->dev_handle = !(instance->dev_handle);
+			io_info.devHandle =
+				local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].devHandle[instance->dev_handle];
+		}
+
+		cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
+		io_request->DevHandle = io_info.devHandle;
+		cmd->pd_interface = io_info.pd_interface;
+		/* populate the LUN field */
+		memcpy(io_request->LUN, raidLUN, 8);
+	} else {
+		io_request->RaidContext.raid_context.timeout_value =
+			cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
+		cmd->request_desc->SCSIIO.RequestFlags =
+			(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
+			 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+		if (instance->adapter_type == INVADER_SERIES) {
+			if (io_info.do_fp_rlbypass ||
+			(io_request->RaidContext.raid_context.reg_lock_flags
+					== REGION_TYPE_UNUSED))
+				cmd->request_desc->SCSIIO.RequestFlags =
+					(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
+					MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+			io_request->RaidContext.raid_context.type
+				= MPI2_TYPE_CUDA;
+			io_request->RaidContext.raid_context.reg_lock_flags |=
+				(MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
+				 MR_RL_FLAGS_SEQ_NUM_ENABLE);
+			io_request->RaidContext.raid_context.nseg = 0x1;
+		} else if (instance->adapter_type == VENTURA_SERIES) {
+			io_request->RaidContext.raid_context_g35.routing_flags |=
+					(1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
+			io_request->RaidContext.raid_context_g35.nseg_type |=
+					(1 << RAID_CONTEXT_NSEG_SHIFT);
+			io_request->RaidContext.raid_context_g35.nseg_type |=
+					(MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
+		}
+		io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
+		io_request->DevHandle = cpu_to_le16(device_id);
+
+	} /* Not FP */
+}
+
+/**
+ * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk
+ * @instance:		Adapter soft state
+ * @scp:		SCSI command
+ * @cmd:		Command to be prepared
+ *
+ * Prepares the io_request frame for non-rw io cmds for vd.
+ */
+static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
+			  struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd)
+{
+	u32 device_id;
+	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
+	u16 ld;
+	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
+	struct fusion_context *fusion = instance->ctrl_context;
+	u8                          span, physArm;
+	__le16                      devHandle;
+	u32                         arRef, pd;
+	struct MR_LD_RAID                  *raid;
+	struct RAID_CONTEXT                *pRAID_Context;
+	u8 fp_possible = 1;
+
+	io_request = cmd->io_request;
+	device_id = MEGASAS_DEV_INDEX(scmd);
+	local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
+	io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
+	/* get RAID_Context pointer */
+	pRAID_Context = &io_request->RaidContext.raid_context;
+	/* Check with FW team */
+	pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
+	pRAID_Context->reg_lock_row_lba    = 0;
+	pRAID_Context->reg_lock_length    = 0;
+
+	if (fusion->fast_path_io && (
+		device_id < instance->fw_supported_vd_count)) {
+
+		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
+		if (ld >= instance->fw_supported_vd_count - 1)
+			fp_possible = 0;
+		else {
+			raid = MR_LdRaidGet(ld, local_map_ptr);
+			if (!(raid->capability.fpNonRWCapable))
+				fp_possible = 0;
+		}
+	} else
+		fp_possible = 0;
+
+	if (!fp_possible) {
+		io_request->Function  = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
+		io_request->DevHandle = cpu_to_le16(device_id);
+		io_request->LUN[1] = scmd->device->lun;
+		pRAID_Context->timeout_value =
+			cpu_to_le16 (scmd->request->timeout / HZ);
+		cmd->request_desc->SCSIIO.RequestFlags =
+			(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+			MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+	} else {
+
+		/* set RAID context values */
+		pRAID_Context->config_seq_num = raid->seqNum;
+		if (instance->adapter_type != VENTURA_SERIES)
+			pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ;
+		pRAID_Context->timeout_value =
+			cpu_to_le16(raid->fpIoTimeoutForLd);
+
+		/* get the DevHandle for the PD (since this is
+		   fpNonRWCapable, this is a single disk RAID0) */
+		span = physArm = 0;
+		arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr);
+		pd = MR_ArPdGet(arRef, physArm, local_map_ptr);
+		devHandle = MR_PdDevHandleGet(pd, local_map_ptr);
+
+		/* build request descriptor */
+		cmd->request_desc->SCSIIO.RequestFlags =
+			(MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
+			MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+		cmd->request_desc->SCSIIO.DevHandle = devHandle;
+
+		/* populate the LUN field */
+		memcpy(io_request->LUN, raid->LUN, 8);
+
+		/* build the raidScsiIO structure */
+		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+		io_request->DevHandle = devHandle;
+	}
+}
+
+/**
+ * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd
+ * @instance:		Adapter soft state
+ * @scp:		SCSI command
+ * @cmd:		Command to be prepared
+ * @fp_possible:	parameter to detect fast path or firmware path io.
+ *
+ * Prepares the io_request frame for rw/non-rw io cmds for syspds
+ */
+static void
+megasas_build_syspd_fusion(struct megasas_instance *instance,
+	struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd,
+	bool fp_possible)
+{
+	u32 device_id;
+	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
+	u16 pd_index = 0;
+	u16 os_timeout_value;
+	u16 timeout_limit;
+	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
+	struct RAID_CONTEXT	*pRAID_Context;
+	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
+	struct MR_PRIV_DEVICE *mr_device_priv_data;
+	struct fusion_context *fusion = instance->ctrl_context;
+	pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1];
+
+	device_id = MEGASAS_DEV_INDEX(scmd);
+	pd_index = MEGASAS_PD_INDEX(scmd);
+	os_timeout_value = scmd->request->timeout / HZ;
+	mr_device_priv_data = scmd->device->hostdata;
+	cmd->pd_interface = mr_device_priv_data->interface_type;
+
+	io_request = cmd->io_request;
+	/* get RAID_Context pointer */
+	pRAID_Context = &io_request->RaidContext.raid_context;
+	pRAID_Context->reg_lock_flags = 0;
+	pRAID_Context->reg_lock_row_lba = 0;
+	pRAID_Context->reg_lock_length = 0;
+	io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
+	io_request->LUN[1] = scmd->device->lun;
+	pRAID_Context->raid_flags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
+		<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
+
+	/* If FW supports PD sequence number */
+	if (instance->use_seqnum_jbod_fp &&
+		instance->pd_list[pd_index].driveType == TYPE_DISK) {
+		/* TgtId must be incremented by 255 as jbod seq number is index
+		 * below raid map
+		 */
+		 /* More than 256 PD/JBOD support for Ventura */
+		if (instance->support_morethan256jbod)
+			pRAID_Context->virtual_disk_tgt_id =
+				pd_sync->seq[pd_index].pd_target_id;
+		else
+			pRAID_Context->virtual_disk_tgt_id =
+				cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
+		pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum;
+		io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
+		if (instance->adapter_type == VENTURA_SERIES) {
+			io_request->RaidContext.raid_context_g35.routing_flags |=
+				(1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
+			io_request->RaidContext.raid_context_g35.nseg_type |=
+							(1 << RAID_CONTEXT_NSEG_SHIFT);
+			io_request->RaidContext.raid_context_g35.nseg_type |=
+							(MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
+		} else {
+			pRAID_Context->type = MPI2_TYPE_CUDA;
+			pRAID_Context->nseg = 0x1;
+			pRAID_Context->reg_lock_flags |=
+				(MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
+		}
+	} else if (fusion->fast_path_io) {
+		pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
+		pRAID_Context->config_seq_num = 0;
+		local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
+		io_request->DevHandle =
+			local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+	} else {
+		/* Want to send all IO via FW path */
+		pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
+		pRAID_Context->config_seq_num = 0;
+		io_request->DevHandle = cpu_to_le16(0xFFFF);
+	}
+
+	cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
+
+	cmd->request_desc->SCSIIO.MSIxIndex =
+		instance->reply_map[raw_smp_processor_id()];
+
+	if (!fp_possible) {
+		/* system pd firmware path */
+		io_request->Function  = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
+		cmd->request_desc->SCSIIO.RequestFlags =
+			(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+				MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+		pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value);
+		pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
+	} else {
+		if (os_timeout_value)
+			os_timeout_value++;
+
+		/* system pd Fast Path */
+		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+		timeout_limit = (scmd->device->type == TYPE_DISK) ?
+				255 : 0xFFFF;
+		pRAID_Context->timeout_value =
+			cpu_to_le16((os_timeout_value > timeout_limit) ?
+			timeout_limit : os_timeout_value);
+		if (instance->adapter_type >= INVADER_SERIES)
+			io_request->IoFlags |=
+				cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
+
+		cmd->request_desc->SCSIIO.RequestFlags =
+			(MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
+				MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+	}
+}
+
+/**
+ * megasas_build_io_fusion -	Prepares IOs to devices
+ * @instance:		Adapter soft state
+ * @scp:		SCSI command
+ * @cmd:		Command to be prepared
+ *
+ * Invokes helper functions to prepare request frames
+ * and sets flags appropriate for IO/Non-IO cmd
+ */
+int
+megasas_build_io_fusion(struct megasas_instance *instance,
+			struct scsi_cmnd *scp,
+			struct megasas_cmd_fusion *cmd)
+{
+	int sge_count;
+	u8  cmd_type;
+	struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
+	struct MR_PRIV_DEVICE *mr_device_priv_data;
+	mr_device_priv_data = scp->device->hostdata;
+
+	/* Zero out some fields so they don't get reused */
+	memset(io_request->LUN, 0x0, 8);
+	io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
+	io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0;
+	io_request->EEDPFlags = 0;
+	io_request->Control = 0;
+	io_request->EEDPBlockSize = 0;
+	io_request->ChainOffset = 0;
+	io_request->RaidContext.raid_context.raid_flags = 0;
+	io_request->RaidContext.raid_context.type = 0;
+	io_request->RaidContext.raid_context.nseg = 0;
+
+	memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
+	/*
+	 * Just the CDB length,rest of the Flags are zero
+	 * This will be modified for FP in build_ldio_fusion
+	 */
+	io_request->IoFlags = cpu_to_le16(scp->cmd_len);
+
+	switch (cmd_type = megasas_cmd_type(scp)) {
+	case READ_WRITE_LDIO:
+		megasas_build_ldio_fusion(instance, scp, cmd);
+		break;
+	case NON_READ_WRITE_LDIO:
+		megasas_build_ld_nonrw_fusion(instance, scp, cmd);
+		break;
+	case READ_WRITE_SYSPDIO:
+		megasas_build_syspd_fusion(instance, scp, cmd, true);
+		break;
+	case NON_READ_WRITE_SYSPDIO:
+		if (instance->secure_jbod_support ||
+		    mr_device_priv_data->is_tm_capable)
+			megasas_build_syspd_fusion(instance, scp, cmd, false);
+		else
+			megasas_build_syspd_fusion(instance, scp, cmd, true);
+		break;
+	default:
+		break;
+	}
+
+	/*
+	 * Construct SGL
+	 */
+
+	sge_count = megasas_make_sgl(instance, scp, cmd);
+
+	if (sge_count > instance->max_num_sge || (sge_count < 0)) {
+		dev_err(&instance->pdev->dev,
+			"%s %d sge_count (%d) is out of range. Range is:  0-%d\n",
+			__func__, __LINE__, sge_count, instance->max_num_sge);
+		return 1;
+	}
+
+	if (instance->adapter_type == VENTURA_SERIES) {
+		set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count);
+		cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags);
+		cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type);
+	} else {
+		/* numSGE store lower 8 bit of sge_count.
+		 * numSGEExt store higher 8 bit of sge_count
+		 */
+		io_request->RaidContext.raid_context.num_sge = sge_count;
+		io_request->RaidContext.raid_context.num_sge_ext =
+			(u8)(sge_count >> 8);
+	}
+
+	io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
+
+	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+		io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
+	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+		io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
+
+	io_request->SGLOffset0 =
+		offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
+
+	io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
+	io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
+
+	cmd->scmd = scp;
+	scp->SCp.ptr = (char *)cmd;
+
+	return 0;
+}
+
+static union MEGASAS_REQUEST_DESCRIPTOR_UNION *
+megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
+{
+	u8 *p;
+	struct fusion_context *fusion;
+
+	fusion = instance->ctrl_context;
+	p = fusion->req_frames_desc +
+		sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * index;
+
+	return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p;
+}
+
+
+/* megasas_prepate_secondRaid1_IO
+ *  It prepares the raid 1 second IO
+ */
+void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
+			    struct megasas_cmd_fusion *cmd,
+			    struct megasas_cmd_fusion *r1_cmd)
+{
+	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL;
+	struct fusion_context *fusion;
+	fusion = instance->ctrl_context;
+	req_desc = cmd->request_desc;
+	/* copy the io request frame as well as 8 SGEs data for r1 command*/
+	memcpy(r1_cmd->io_request, cmd->io_request,
+	       (sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)));
+	memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL,
+	       (fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION)));
+	/*sense buffer is different for r1 command*/
+	r1_cmd->io_request->SenseBufferLowAddress =
+			cpu_to_le32(r1_cmd->sense_phys_addr);
+	r1_cmd->scmd = cmd->scmd;
+	req_desc2 = megasas_get_request_descriptor(instance,
+						   (r1_cmd->index - 1));
+	req_desc2->Words = 0;
+	r1_cmd->request_desc = req_desc2;
+	req_desc2->SCSIIO.SMID = cpu_to_le16(r1_cmd->index);
+	req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags;
+	r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle;
+	r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle;
+	r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle;
+	cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid =
+			cpu_to_le16(r1_cmd->index);
+	r1_cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid =
+			cpu_to_le16(cmd->index);
+	/*MSIxIndex of both commands request descriptors should be same*/
+	r1_cmd->request_desc->SCSIIO.MSIxIndex =
+			cmd->request_desc->SCSIIO.MSIxIndex;
+	/*span arm is different for r1 cmd*/
+	r1_cmd->io_request->RaidContext.raid_context_g35.span_arm =
+			cmd->io_request->RaidContext.raid_context_g35.span_arm + 1;
+}
+
+/**
+ * megasas_build_and_issue_cmd_fusion -Main routine for building and
+ *                                     issuing non IOCTL cmd
+ * @instance:			Adapter soft state
+ * @scmd:			pointer to scsi cmd from OS
+ */
+static u32
+megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
+				   struct scsi_cmnd *scmd)
+{
+	struct megasas_cmd_fusion *cmd, *r1_cmd = NULL;
+	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+	u32 index;
+	struct fusion_context *fusion;
+
+	fusion = instance->ctrl_context;
+
+	if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) &&
+		instance->ldio_threshold &&
+		(atomic_inc_return(&instance->ldio_outstanding) >
+		instance->ldio_threshold)) {
+		atomic_dec(&instance->ldio_outstanding);
+		return SCSI_MLQUEUE_DEVICE_BUSY;
+	}
+
+	if (atomic_inc_return(&instance->fw_outstanding) >
+			instance->host->can_queue) {
+		atomic_dec(&instance->fw_outstanding);
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
+
+	if (!cmd) {
+		atomic_dec(&instance->fw_outstanding);
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	index = cmd->index;
+
+	req_desc = megasas_get_request_descriptor(instance, index-1);
+
+	req_desc->Words = 0;
+	cmd->request_desc = req_desc;
+
+	if (megasas_build_io_fusion(instance, scmd, cmd)) {
+		megasas_return_cmd_fusion(instance, cmd);
+		dev_err(&instance->pdev->dev, "Error building command\n");
+		cmd->request_desc = NULL;
+		atomic_dec(&instance->fw_outstanding);
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	req_desc = cmd->request_desc;
+	req_desc->SCSIIO.SMID = cpu_to_le16(index);
+
+	if (cmd->io_request->ChainOffset != 0 &&
+	    cmd->io_request->ChainOffset != 0xF)
+		dev_err(&instance->pdev->dev, "The chain offset value is not "
+		       "correct : %x\n", cmd->io_request->ChainOffset);
+	/*
+	 *	if it is raid 1/10 fp write capable.
+	 *	try to get second command from pool and construct it.
+	 *	From FW, it has confirmed that lba values of two PDs
+	 *	corresponds to single R1/10 LD are always same
+	 *
+	 */
+	/*	driver side count always should be less than max_fw_cmds
+	 *	to get new command
+	 */
+	if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
+		r1_cmd = megasas_get_cmd_fusion(instance,
+				(scmd->request->tag + instance->max_fw_cmds));
+		megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd);
+	}
+
+
+	/*
+	 * Issue the command to the FW
+	 */
+
+	megasas_fire_cmd_fusion(instance, req_desc);
+
+	if (r1_cmd)
+		megasas_fire_cmd_fusion(instance, r1_cmd->request_desc);
+
+
+	return 0;
+}
+
+/**
+ * megasas_complete_r1_command -
+ * completes R1 FP write commands which has valid peer smid
+ * @instance:			Adapter soft state
+ * @cmd_fusion:			MPT command frame
+ *
+ */
+static inline void
+megasas_complete_r1_command(struct megasas_instance *instance,
+			    struct megasas_cmd_fusion *cmd)
+{
+	u8 *sense, status, ex_status;
+	u32 data_length;
+	u16 peer_smid;
+	struct fusion_context *fusion;
+	struct megasas_cmd_fusion *r1_cmd = NULL;
+	struct scsi_cmnd *scmd_local = NULL;
+	struct RAID_CONTEXT_G35 *rctx_g35;
+
+	rctx_g35 = &cmd->io_request->RaidContext.raid_context_g35;
+	fusion = instance->ctrl_context;
+	peer_smid = le16_to_cpu(rctx_g35->smid.peer_smid);
+
+	r1_cmd = fusion->cmd_list[peer_smid - 1];
+	scmd_local = cmd->scmd;
+	status = rctx_g35->status;
+	ex_status = rctx_g35->ex_status;
+	data_length = cmd->io_request->DataLength;
+	sense = cmd->sense;
+
+	cmd->cmd_completed = true;
+
+	/* Check if peer command is completed or not*/
+	if (r1_cmd->cmd_completed) {
+		rctx_g35 = &r1_cmd->io_request->RaidContext.raid_context_g35;
+		if (rctx_g35->status != MFI_STAT_OK) {
+			status = rctx_g35->status;
+			ex_status = rctx_g35->ex_status;
+			data_length = r1_cmd->io_request->DataLength;
+			sense = r1_cmd->sense;
+		}
+
+		megasas_return_cmd_fusion(instance, r1_cmd);
+		map_cmd_status(fusion, scmd_local, status, ex_status,
+			       le32_to_cpu(data_length), sense);
+		if (instance->ldio_threshold &&
+		    megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
+			atomic_dec(&instance->ldio_outstanding);
+		scmd_local->SCp.ptr = NULL;
+		megasas_return_cmd_fusion(instance, cmd);
+		scsi_dma_unmap(scmd_local);
+		scmd_local->scsi_done(scmd_local);
+	}
+}
+
+/**
+ * complete_cmd_fusion -	Completes command
+ * @instance:			Adapter soft state
+ * Completes all commands that is in reply descriptor queue
+ */
+int
+complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
+{
+	union MPI2_REPLY_DESCRIPTORS_UNION *desc;
+	struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
+	struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
+	struct fusion_context *fusion;
+	struct megasas_cmd *cmd_mfi;
+	struct megasas_cmd_fusion *cmd_fusion;
+	u16 smid, num_completed;
+	u8 reply_descript_type, *sense, status, extStatus;
+	u32 device_id, data_length;
+	union desc_value d_val;
+	struct LD_LOAD_BALANCE_INFO *lbinfo;
+	int threshold_reply_count = 0;
+	struct scsi_cmnd *scmd_local = NULL;
+	struct MR_TASK_MANAGE_REQUEST *mr_tm_req;
+	struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
+
+	fusion = instance->ctrl_context;
+
+	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
+		return IRQ_HANDLED;
+
+	desc = fusion->reply_frames_desc[MSIxIndex] +
+				fusion->last_reply_idx[MSIxIndex];
+
+	reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
+
+	d_val.word = desc->Words;
+
+	reply_descript_type = reply_desc->ReplyFlags &
+		MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+
+	if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+		return IRQ_NONE;
+
+	num_completed = 0;
+
+	while (d_val.u.low != cpu_to_le32(UINT_MAX) &&
+	       d_val.u.high != cpu_to_le32(UINT_MAX)) {
+
+		smid = le16_to_cpu(reply_desc->SMID);
+		cmd_fusion = fusion->cmd_list[smid - 1];
+		scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *)
+						cmd_fusion->io_request;
+
+		scmd_local = cmd_fusion->scmd;
+		status = scsi_io_req->RaidContext.raid_context.status;
+		extStatus = scsi_io_req->RaidContext.raid_context.ex_status;
+		sense = cmd_fusion->sense;
+		data_length = scsi_io_req->DataLength;
+
+		switch (scsi_io_req->Function) {
+		case MPI2_FUNCTION_SCSI_TASK_MGMT:
+			mr_tm_req = (struct MR_TASK_MANAGE_REQUEST *)
+						cmd_fusion->io_request;
+			mpi_tm_req = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *)
+						&mr_tm_req->TmRequest;
+			dev_dbg(&instance->pdev->dev, "TM completion:"
+				"type: 0x%x TaskMID: 0x%x\n",
+				mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
+			complete(&cmd_fusion->done);
+			break;
+		case MPI2_FUNCTION_SCSI_IO_REQUEST:  /*Fast Path IO.*/
+			/* Update load balancing info */
+			if (fusion->load_balance_info &&
+			    (cmd_fusion->scmd->SCp.Status &
+			    MEGASAS_LOAD_BALANCE_FLAG)) {
+				device_id = MEGASAS_DEV_INDEX(scmd_local);
+				lbinfo = &fusion->load_balance_info[device_id];
+				atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
+				cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
+			}
+			//Fall thru and complete IO
+		case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
+			atomic_dec(&instance->fw_outstanding);
+			if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
+				map_cmd_status(fusion, scmd_local, status,
+					       extStatus, le32_to_cpu(data_length),
+					       sense);
+				if (instance->ldio_threshold &&
+				    (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO))
+					atomic_dec(&instance->ldio_outstanding);
+				scmd_local->SCp.ptr = NULL;
+				megasas_return_cmd_fusion(instance, cmd_fusion);
+				scsi_dma_unmap(scmd_local);
+				scmd_local->scsi_done(scmd_local);
+			} else	/* Optimal VD - R1 FP command completion. */
+				megasas_complete_r1_command(instance, cmd_fusion);
+			break;
+		case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
+			cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+			/* Poll mode. Dummy free.
+			 * In case of Interrupt mode, caller has reverse check.
+			 */
+			if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) {
+				cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE;
+				megasas_return_cmd(instance, cmd_mfi);
+			} else
+				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
+			break;
+		}
+
+		fusion->last_reply_idx[MSIxIndex]++;
+		if (fusion->last_reply_idx[MSIxIndex] >=
+		    fusion->reply_q_depth)
+			fusion->last_reply_idx[MSIxIndex] = 0;
+
+		desc->Words = cpu_to_le64(ULLONG_MAX);
+		num_completed++;
+		threshold_reply_count++;
+
+		/* Get the next reply descriptor */
+		if (!fusion->last_reply_idx[MSIxIndex])
+			desc = fusion->reply_frames_desc[MSIxIndex];
+		else
+			desc++;
+
+		reply_desc =
+		  (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
+
+		d_val.word = desc->Words;
+
+		reply_descript_type = reply_desc->ReplyFlags &
+			MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+
+		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+			break;
+		/*
+		 * Write to reply post host index register after completing threshold
+		 * number of reply counts and still there are more replies in reply queue
+		 * pending to be completed
+		 */
+		if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
+			if (instance->msix_combined)
+				writel(((MSIxIndex & 0x7) << 24) |
+					fusion->last_reply_idx[MSIxIndex],
+					instance->reply_post_host_index_addr[MSIxIndex/8]);
+			else
+				writel((MSIxIndex << 24) |
+					fusion->last_reply_idx[MSIxIndex],
+					instance->reply_post_host_index_addr[0]);
+			threshold_reply_count = 0;
+		}
+	}
+
+	if (!num_completed)
+		return IRQ_NONE;
+
+	wmb();
+	if (instance->msix_combined)
+		writel(((MSIxIndex & 0x7) << 24) |
+			fusion->last_reply_idx[MSIxIndex],
+			instance->reply_post_host_index_addr[MSIxIndex/8]);
+	else
+		writel((MSIxIndex << 24) |
+			fusion->last_reply_idx[MSIxIndex],
+			instance->reply_post_host_index_addr[0]);
+	megasas_check_and_restore_queue_depth(instance);
+	return IRQ_HANDLED;
+}
+
+/**
+ * megasas_sync_irqs -	Synchronizes all IRQs owned by adapter
+ * @instance:			Adapter soft state
+ */
+void megasas_sync_irqs(unsigned long instance_addr)
+{
+	u32 count, i;
+	struct megasas_instance *instance =
+		(struct megasas_instance *)instance_addr;
+
+	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+
+	for (i = 0; i < count; i++)
+		synchronize_irq(pci_irq_vector(instance->pdev, i));
+}
+
+/**
+ * megasas_complete_cmd_dpc_fusion -	Completes command
+ * @instance:			Adapter soft state
+ *
+ * Tasklet to complete cmds
+ */
+void
+megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
+{
+	struct megasas_instance *instance =
+		(struct megasas_instance *)instance_addr;
+	unsigned long flags;
+	u32 count, MSIxIndex;
+
+	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+
+	/* If we have already declared adapter dead, donot complete cmds */
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+		spin_unlock_irqrestore(&instance->hba_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+	for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
+		complete_cmd_fusion(instance, MSIxIndex);
+}
+
+/**
+ * megasas_isr_fusion - isr entry point
+ */
+irqreturn_t megasas_isr_fusion(int irq, void *devp)
+{
+	struct megasas_irq_context *irq_context = devp;
+	struct megasas_instance *instance = irq_context->instance;
+	u32 mfiStatus, fw_state, dma_state;
+
+	if (instance->mask_interrupts)
+		return IRQ_NONE;
+
+	if (!instance->msix_vectors) {
+		mfiStatus = instance->instancet->clear_intr(instance->reg_set);
+		if (!mfiStatus)
+			return IRQ_NONE;
+	}
+
+	/* If we are resetting, bail */
+	if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) {
+		instance->instancet->clear_intr(instance->reg_set);
+		return IRQ_HANDLED;
+	}
+
+	if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) {
+		instance->instancet->clear_intr(instance->reg_set);
+		/* If we didn't complete any commands, check for FW fault */
+		fw_state = instance->instancet->read_fw_status_reg(
+			instance->reg_set) & MFI_STATE_MASK;
+		dma_state = instance->instancet->read_fw_status_reg
+			(instance->reg_set) & MFI_STATE_DMADONE;
+		if (instance->crash_dump_drv_support &&
+			instance->crash_dump_app_support) {
+			/* Start collecting crash, if DMA bit is done */
+			if ((fw_state == MFI_STATE_FAULT) && dma_state)
+				schedule_work(&instance->crash_init);
+			else if (fw_state == MFI_STATE_FAULT) {
+				if (instance->unload == 0)
+					schedule_work(&instance->work_init);
+			}
+		} else if (fw_state == MFI_STATE_FAULT) {
+			dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
+			       "for scsi%d\n", instance->host->host_no);
+			if (instance->unload == 0)
+				schedule_work(&instance->work_init);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru
+ * @instance:			Adapter soft state
+ * mfi_cmd:			megasas_cmd pointer
+ *
+ */
+void
+build_mpt_mfi_pass_thru(struct megasas_instance *instance,
+			struct megasas_cmd *mfi_cmd)
+{
+	struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
+	struct MPI2_RAID_SCSI_IO_REQUEST *io_req;
+	struct megasas_cmd_fusion *cmd;
+	struct fusion_context *fusion;
+	struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
+
+	fusion = instance->ctrl_context;
+
+	cmd = megasas_get_cmd_fusion(instance,
+			instance->max_scsi_cmds + mfi_cmd->index);
+
+	/*  Save the smid. To be used for returning the cmd */
+	mfi_cmd->context.smid = cmd->index;
+
+	/*
+	 * For cmds where the flag is set, store the flag and check
+	 * on completion. For cmds with this flag, don't call
+	 * megasas_complete_cmd
+	 */
+
+	if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
+		mfi_cmd->flags |= DRV_DCMD_POLLED_MODE;
+
+	io_req = cmd->io_request;
+
+	if (instance->adapter_type >= INVADER_SERIES) {
+		struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
+			(struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
+		sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
+		sgl_ptr_end->Flags = 0;
+	}
+
+	mpi25_ieee_chain =
+	  (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
+
+	io_req->Function    = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
+	io_req->SGLOffset0  = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST,
+				       SGL) / 4;
+	io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
+
+	mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr);
+
+	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+		MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
+
+	mpi25_ieee_chain->Length = cpu_to_le32(instance->mfi_frame_size);
+}
+
+/**
+ * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd
+ * @instance:			Adapter soft state
+ * @cmd:			mfi cmd to build
+ *
+ */
+union MEGASAS_REQUEST_DESCRIPTOR_UNION *
+build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL;
+	u16 index;
+
+	build_mpt_mfi_pass_thru(instance, cmd);
+	index = cmd->context.smid;
+
+	req_desc = megasas_get_request_descriptor(instance, index - 1);
+
+	req_desc->Words = 0;
+	req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+					 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+
+	req_desc->SCSIIO.SMID = cpu_to_le16(index);
+
+	return req_desc;
+}
+
+/**
+ * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd
+ * @instance:			Adapter soft state
+ * @cmd:			mfi cmd pointer
+ *
+ */
+void
+megasas_issue_dcmd_fusion(struct megasas_instance *instance,
+			  struct megasas_cmd *cmd)
+{
+	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+
+	req_desc = build_mpt_cmd(instance, cmd);
+
+	megasas_fire_cmd_fusion(instance, req_desc);
+	return;
+}
+
+/**
+ * megasas_release_fusion -	Reverses the FW initialization
+ * @instance:			Adapter soft state
+ */
+void
+megasas_release_fusion(struct megasas_instance *instance)
+{
+	megasas_free_cmds(instance);
+	megasas_free_cmds_fusion(instance);
+
+	iounmap(instance->reg_set);
+
+	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
+}
+
+/**
+ * megasas_read_fw_status_reg_fusion - returns the current FW status value
+ * @regs:			MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs)
+{
+	return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_alloc_host_crash_buffer -	Host buffers for Crash dump collection from Firmware
+ * @instance:				Controller's soft instance
+ * return:			        Number of allocated host crash buffers
+ */
+static void
+megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
+{
+	unsigned int i;
+
+	for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) {
+		instance->crash_buf[i] = vzalloc(CRASH_DMA_BUF_SIZE);
+		if (!instance->crash_buf[i]) {
+			dev_info(&instance->pdev->dev, "Firmware crash dump "
+				"memory allocation failed at index %d\n", i);
+			break;
+		}
+	}
+	instance->drv_buf_alloc = i;
+}
+
+/**
+ * megasas_free_host_crash_buffer -	Host buffers for Crash dump collection from Firmware
+ * @instance:				Controller's soft instance
+ */
+void
+megasas_free_host_crash_buffer(struct megasas_instance *instance)
+{
+	unsigned int i;
+	for (i = 0; i < instance->drv_buf_alloc; i++) {
+		if (instance->crash_buf[i])
+			vfree(instance->crash_buf[i]);
+	}
+	instance->drv_buf_index = 0;
+	instance->drv_buf_alloc = 0;
+	instance->fw_crash_state = UNAVAILABLE;
+	instance->fw_crash_buffer_size = 0;
+}
+
+/**
+ * megasas_adp_reset_fusion -	For controller reset
+ * @regs:				MFI register set
+ */
+static int
+megasas_adp_reset_fusion(struct megasas_instance *instance,
+			 struct megasas_register_set __iomem *regs)
+{
+	u32 host_diag, abs_state, retry;
+
+	/* Now try to reset the chip */
+	writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
+	writel(MPI2_WRSEQ_1ST_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
+	writel(MPI2_WRSEQ_2ND_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
+	writel(MPI2_WRSEQ_3RD_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
+	writel(MPI2_WRSEQ_4TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
+	writel(MPI2_WRSEQ_5TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
+	writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
+
+	/* Check that the diag write enable (DRWE) bit is on */
+	host_diag = readl(&instance->reg_set->fusion_host_diag);
+	retry = 0;
+	while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
+		msleep(100);
+		host_diag = readl(&instance->reg_set->fusion_host_diag);
+		if (retry++ == 100) {
+			dev_warn(&instance->pdev->dev,
+				"Host diag unlock failed from %s %d\n",
+				__func__, __LINE__);
+			break;
+		}
+	}
+	if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
+		return -1;
+
+	/* Send chip reset command */
+	writel(host_diag | HOST_DIAG_RESET_ADAPTER,
+		&instance->reg_set->fusion_host_diag);
+	msleep(3000);
+
+	/* Make sure reset adapter bit is cleared */
+	host_diag = readl(&instance->reg_set->fusion_host_diag);
+	retry = 0;
+	while (host_diag & HOST_DIAG_RESET_ADAPTER) {
+		msleep(100);
+		host_diag = readl(&instance->reg_set->fusion_host_diag);
+		if (retry++ == 1000) {
+			dev_warn(&instance->pdev->dev,
+				"Diag reset adapter never cleared %s %d\n",
+				__func__, __LINE__);
+			break;
+		}
+	}
+	if (host_diag & HOST_DIAG_RESET_ADAPTER)
+		return -1;
+
+	abs_state = instance->instancet->read_fw_status_reg(instance->reg_set)
+			& MFI_STATE_MASK;
+	retry = 0;
+
+	while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
+		msleep(100);
+		abs_state = instance->instancet->
+			read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+	}
+	if (abs_state <= MFI_STATE_FW_INIT) {
+		dev_warn(&instance->pdev->dev,
+			"fw state < MFI_STATE_FW_INIT, state = 0x%x %s %d\n",
+			abs_state, __func__, __LINE__);
+		return -1;
+	}
+
+	return 0;
+}
+
+/**
+ * megasas_check_reset_fusion -	For controller reset check
+ * @regs:				MFI register set
+ */
+static int
+megasas_check_reset_fusion(struct megasas_instance *instance,
+			   struct megasas_register_set __iomem *regs)
+{
+	return 0;
+}
+
+/* This function waits for outstanding commands on fusion to complete */
+int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
+					int reason, int *convert)
+{
+	int i, outstanding, retval = 0, hb_seconds_missed = 0;
+	u32 fw_state;
+
+	for (i = 0; i < resetwaittime; i++) {
+		/* Check if firmware is in fault state */
+		fw_state = instance->instancet->read_fw_status_reg(
+			instance->reg_set) & MFI_STATE_MASK;
+		if (fw_state == MFI_STATE_FAULT) {
+			dev_warn(&instance->pdev->dev, "Found FW in FAULT state,"
+			       " will reset adapter scsi%d.\n",
+				instance->host->host_no);
+			megasas_complete_cmd_dpc_fusion((unsigned long)instance);
+			if (instance->requestorId && reason) {
+				dev_warn(&instance->pdev->dev, "SR-IOV Found FW in FAULT"
+				" state while polling during"
+				" I/O timeout handling for %d\n",
+				instance->host->host_no);
+				*convert = 1;
+			}
+
+			retval = 1;
+			goto out;
+		}
+
+		if (reason == MFI_IO_TIMEOUT_OCR) {
+			dev_info(&instance->pdev->dev,
+				"MFI IO is timed out, initiating OCR\n");
+			megasas_complete_cmd_dpc_fusion((unsigned long)instance);
+			retval = 1;
+			goto out;
+		}
+
+		/* If SR-IOV VF mode & heartbeat timeout, don't wait */
+		if (instance->requestorId && !reason) {
+			retval = 1;
+			goto out;
+		}
+
+		/* If SR-IOV VF mode & I/O timeout, check for HB timeout */
+		if (instance->requestorId && (reason == SCSIIO_TIMEOUT_OCR)) {
+			if (instance->hb_host_mem->HB.fwCounter !=
+			    instance->hb_host_mem->HB.driverCounter) {
+				instance->hb_host_mem->HB.driverCounter =
+					instance->hb_host_mem->HB.fwCounter;
+				hb_seconds_missed = 0;
+			} else {
+				hb_seconds_missed++;
+				if (hb_seconds_missed ==
+				    (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
+					dev_warn(&instance->pdev->dev, "SR-IOV:"
+					       " Heartbeat never completed "
+					       " while polling during I/O "
+					       " timeout handling for "
+					       "scsi%d.\n",
+					       instance->host->host_no);
+					       *convert = 1;
+					       retval = 1;
+					       goto out;
+				}
+			}
+		}
+
+		megasas_complete_cmd_dpc_fusion((unsigned long)instance);
+		outstanding = atomic_read(&instance->fw_outstanding);
+		if (!outstanding)
+			goto out;
+
+		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
+			       "commands to complete for scsi%d\n", i,
+			       outstanding, instance->host->host_no);
+		}
+		msleep(1000);
+	}
+
+	if (atomic_read(&instance->fw_outstanding)) {
+		dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
+		       "will reset adapter scsi%d.\n",
+		       instance->host->host_no);
+		*convert = 1;
+		retval = 1;
+	}
+out:
+	return retval;
+}
+
+void  megasas_reset_reply_desc(struct megasas_instance *instance)
+{
+	int i, j, count;
+	struct fusion_context *fusion;
+	union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
+
+	fusion = instance->ctrl_context;
+	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+	for (i = 0 ; i < count ; i++) {
+		fusion->last_reply_idx[i] = 0;
+		reply_desc = fusion->reply_frames_desc[i];
+		for (j = 0 ; j < fusion->reply_q_depth; j++, reply_desc++)
+			reply_desc->Words = cpu_to_le64(ULLONG_MAX);
+	}
+}
+
+/*
+ * megasas_refire_mgmt_cmd :	Re-fire management commands
+ * @instance:				Controller's soft instance
+*/
+void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
+{
+	int j;
+	struct megasas_cmd_fusion *cmd_fusion;
+	struct fusion_context *fusion;
+	struct megasas_cmd *cmd_mfi;
+	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+	u16 smid;
+	bool refire_cmd = 0;
+
+	fusion = instance->ctrl_context;
+
+	/* Re-fire management commands.
+	 * Do not traverse complet MPT frame pool. Start from max_scsi_cmds.
+	 */
+	for (j = instance->max_scsi_cmds ; j < instance->max_fw_cmds; j++) {
+		cmd_fusion = fusion->cmd_list[j];
+		cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+		smid = le16_to_cpu(cmd_mfi->context.smid);
+
+		if (!smid)
+			continue;
+
+		/* Do not refire shutdown command */
+		if (le32_to_cpu(cmd_mfi->frame->dcmd.opcode) ==
+			MR_DCMD_CTRL_SHUTDOWN) {
+			cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK;
+			megasas_complete_cmd(instance, cmd_mfi, DID_OK);
+			continue;
+		}
+
+		req_desc = megasas_get_request_descriptor
+					(instance, smid - 1);
+		refire_cmd = req_desc && ((cmd_mfi->frame->dcmd.opcode !=
+				cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) &&
+				 (cmd_mfi->frame->dcmd.opcode !=
+				cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO)))
+				&& !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
+		if (refire_cmd)
+			megasas_fire_cmd_fusion(instance, req_desc);
+		else
+			megasas_return_cmd(instance, cmd_mfi);
+	}
+}
+
+/*
+ * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device
+ * @instance: per adapter struct
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
+ *
+ * Returns SUCCESS if no IOs pending to SCSI device, else return FAILED
+ */
+
+static int megasas_track_scsiio(struct megasas_instance *instance,
+		int id, int channel)
+{
+	int i, found = 0;
+	struct megasas_cmd_fusion *cmd_fusion;
+	struct fusion_context *fusion;
+	fusion = instance->ctrl_context;
+
+	for (i = 0 ; i < instance->max_scsi_cmds; i++) {
+		cmd_fusion = fusion->cmd_list[i];
+		if (cmd_fusion->scmd &&
+			(cmd_fusion->scmd->device->id == id &&
+			cmd_fusion->scmd->device->channel == channel)) {
+			dev_info(&instance->pdev->dev,
+				"SCSI commands pending to target"
+				"channel %d id %d \tSMID: 0x%x\n",
+				channel, id, cmd_fusion->index);
+			scsi_print_command(cmd_fusion->scmd);
+			found = 1;
+			break;
+		}
+	}
+
+	return found ? FAILED : SUCCESS;
+}
+
+/**
+ * megasas_tm_response_code - translation of device response code
+ * @ioc: per adapter object
+ * @mpi_reply: MPI reply returned by firmware
+ *
+ * Return nothing.
+ */
+static void
+megasas_tm_response_code(struct megasas_instance *instance,
+		struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply)
+{
+	char *desc;
+
+	switch (mpi_reply->ResponseCode) {
+	case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
+		desc = "task management request completed";
+		break;
+	case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
+		desc = "invalid frame";
+		break;
+	case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
+		desc = "task management request not supported";
+		break;
+	case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
+		desc = "task management request failed";
+		break;
+	case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
+		desc = "task management request succeeded";
+		break;
+	case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
+		desc = "invalid lun";
+		break;
+	case 0xA:
+		desc = "overlapped tag attempted";
+		break;
+	case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
+		desc = "task queued, however not sent to target";
+		break;
+	default:
+		desc = "unknown";
+		break;
+	}
+	dev_dbg(&instance->pdev->dev, "response_code(%01x): %s\n",
+		mpi_reply->ResponseCode, desc);
+	dev_dbg(&instance->pdev->dev,
+		"TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo"
+		" 0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n",
+		mpi_reply->TerminationCount, mpi_reply->DevHandle,
+		mpi_reply->Function, mpi_reply->TaskType,
+		mpi_reply->IOCStatus, mpi_reply->IOCLogInfo);
+}
+
+/**
+ * megasas_issue_tm - main routine for sending tm requests
+ * @instance: per adapter struct
+ * @device_handle: device handle
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
+ * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c)
+ * @smid_task: smid assigned to the task
+ * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
+ * Context: user
+ *
+ * MegaRaid use MPT interface for Task Magement request.
+ * A generic API for sending task management requests to firmware.
+ *
+ * Return SUCCESS or FAILED.
+ */
+static int
+megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
+	uint channel, uint id, u16 smid_task, u8 type)
+{
+	struct MR_TASK_MANAGE_REQUEST *mr_request;
+	struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request;
+	unsigned long timeleft;
+	struct megasas_cmd_fusion *cmd_fusion;
+	struct megasas_cmd *cmd_mfi;
+	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+	struct fusion_context *fusion = NULL;
+	struct megasas_cmd_fusion *scsi_lookup;
+	int rc;
+	struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply;
+
+	fusion = instance->ctrl_context;
+
+	cmd_mfi = megasas_get_cmd(instance);
+
+	if (!cmd_mfi) {
+		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+			__func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	cmd_fusion = megasas_get_cmd_fusion(instance,
+			instance->max_scsi_cmds + cmd_mfi->index);
+
+	/*  Save the smid. To be used for returning the cmd */
+	cmd_mfi->context.smid = cmd_fusion->index;
+
+	req_desc = megasas_get_request_descriptor(instance,
+			(cmd_fusion->index - 1));
+
+	cmd_fusion->request_desc = req_desc;
+	req_desc->Words = 0;
+
+	mr_request = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request;
+	memset(mr_request, 0, sizeof(struct MR_TASK_MANAGE_REQUEST));
+	mpi_request = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest;
+	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+	mpi_request->DevHandle = cpu_to_le16(device_handle);
+	mpi_request->TaskType = type;
+	mpi_request->TaskMID = cpu_to_le16(smid_task);
+	mpi_request->LUN[1] = 0;
+
+
+	req_desc = cmd_fusion->request_desc;
+	req_desc->HighPriority.SMID = cpu_to_le16(cmd_fusion->index);
+	req_desc->HighPriority.RequestFlags =
+		(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+		MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+	req_desc->HighPriority.MSIxIndex =  0;
+	req_desc->HighPriority.LMID = 0;
+	req_desc->HighPriority.Reserved1 = 0;
+
+	if (channel < MEGASAS_MAX_PD_CHANNELS)
+		mr_request->tmReqFlags.isTMForPD = 1;
+	else
+		mr_request->tmReqFlags.isTMForLD = 1;
+
+	init_completion(&cmd_fusion->done);
+	megasas_fire_cmd_fusion(instance, req_desc);
+
+	timeleft = wait_for_completion_timeout(&cmd_fusion->done, 50 * HZ);
+
+	if (!timeleft) {
+		dev_err(&instance->pdev->dev,
+			"task mgmt type 0x%x timed out\n", type);
+		cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE;
+		mutex_unlock(&instance->reset_mutex);
+		rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR);
+		mutex_lock(&instance->reset_mutex);
+		return rc;
+	}
+
+	mpi_reply = (struct MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->TMReply;
+	megasas_tm_response_code(instance, mpi_reply);
+
+	megasas_return_cmd(instance, cmd_mfi);
+	rc = SUCCESS;
+	switch (type) {
+	case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+		scsi_lookup = fusion->cmd_list[smid_task - 1];
+
+		if (scsi_lookup->scmd == NULL)
+			break;
+		else {
+			instance->instancet->disable_intr(instance);
+			megasas_sync_irqs((unsigned long)instance);
+			instance->instancet->enable_intr(instance);
+			if (scsi_lookup->scmd == NULL)
+				break;
+		}
+		rc = FAILED;
+		break;
+
+	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+		if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF))
+			break;
+		instance->instancet->disable_intr(instance);
+		megasas_sync_irqs((unsigned long)instance);
+		rc = megasas_track_scsiio(instance, id, channel);
+		instance->instancet->enable_intr(instance);
+
+		break;
+	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
+	case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
+		break;
+	default:
+		rc = FAILED;
+		break;
+	}
+
+	return rc;
+
+}
+
+/*
+ * megasas_fusion_smid_lookup : Look for fusion command correpspodning to SCSI
+ * @instance: per adapter struct
+ *
+ * Return Non Zero index, if SMID found in outstanding commands
+ */
+static u16 megasas_fusion_smid_lookup(struct scsi_cmnd *scmd)
+{
+	int i, ret = 0;
+	struct megasas_instance *instance;
+	struct megasas_cmd_fusion *cmd_fusion;
+	struct fusion_context *fusion;
+
+	instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+	fusion = instance->ctrl_context;
+
+	for (i = 0; i < instance->max_scsi_cmds; i++) {
+		cmd_fusion = fusion->cmd_list[i];
+		if (cmd_fusion->scmd && (cmd_fusion->scmd == scmd)) {
+			scmd_printk(KERN_NOTICE, scmd, "Abort request is for"
+				" SMID: %d\n", cmd_fusion->index);
+			ret = cmd_fusion->index;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+/*
+* megasas_get_tm_devhandle - Get devhandle for TM request
+* @sdev-		     OS provided scsi device
+*
+* Returns-		     devhandle/targetID of SCSI device
+*/
+static u16 megasas_get_tm_devhandle(struct scsi_device *sdev)
+{
+	u16 pd_index = 0;
+	u32 device_id;
+	struct megasas_instance *instance;
+	struct fusion_context *fusion;
+	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
+	u16 devhandle = (u16)ULONG_MAX;
+
+	instance = (struct megasas_instance *)sdev->host->hostdata;
+	fusion = instance->ctrl_context;
+
+	if (!MEGASAS_IS_LOGICAL(sdev)) {
+		if (instance->use_seqnum_jbod_fp) {
+			pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL)
+				    + sdev->id;
+			pd_sync = (void *)fusion->pd_seq_sync
+					[(instance->pd_seq_map_id - 1) & 1];
+			devhandle = pd_sync->seq[pd_index].devHandle;
+		} else
+			sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable"
+				" without JBOD MAP support from %s %d\n", __func__, __LINE__);
+	} else {
+		device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
+				+ sdev->id;
+		devhandle = device_id;
+	}
+
+	return devhandle;
+}
+
+/*
+ * megasas_task_abort_fusion : SCSI task abort function for fusion adapters
+ * @scmd : pointer to scsi command object
+ *
+ * Return SUCCESS, if command aborted else FAILED
+ */
+
+int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
+{
+	struct megasas_instance *instance;
+	u16 smid, devhandle;
+	struct fusion_context *fusion;
+	int ret;
+	struct MR_PRIV_DEVICE *mr_device_priv_data;
+	mr_device_priv_data = scmd->device->hostdata;
+
+
+	instance = (struct megasas_instance *)scmd->device->host->hostdata;
+	fusion = instance->ctrl_context;
+
+	scmd_printk(KERN_INFO, scmd, "task abort called for scmd(%p)\n", scmd);
+	scsi_print_command(scmd);
+
+	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
+		dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
+		"SCSI host:%d\n", instance->host->host_no);
+		ret = FAILED;
+		return ret;
+	}
+
+	if (!mr_device_priv_data) {
+		sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
+			"scmd(%p)\n", scmd);
+		scmd->result = DID_NO_CONNECT << 16;
+		ret = SUCCESS;
+		goto out;
+	}
+
+
+	if (!mr_device_priv_data->is_tm_capable) {
+		ret = FAILED;
+		goto out;
+	}
+
+	mutex_lock(&instance->reset_mutex);
+
+	smid = megasas_fusion_smid_lookup(scmd);
+
+	if (!smid) {
+		ret = SUCCESS;
+		scmd_printk(KERN_NOTICE, scmd, "Command for which abort is"
+			" issued is not found in oustanding commands\n");
+		mutex_unlock(&instance->reset_mutex);
+		goto out;
+	}
+
+	devhandle = megasas_get_tm_devhandle(scmd->device);
+
+	if (devhandle == (u16)ULONG_MAX) {
+		ret = SUCCESS;
+		sdev_printk(KERN_INFO, scmd->device,
+			"task abort issued for invalid devhandle\n");
+		mutex_unlock(&instance->reset_mutex);
+		goto out;
+	}
+	sdev_printk(KERN_INFO, scmd->device,
+		"attempting task abort! scmd(%p) tm_dev_handle 0x%x\n",
+		scmd, devhandle);
+
+	mr_device_priv_data->tm_busy = 1;
+	ret = megasas_issue_tm(instance, devhandle,
+			scmd->device->channel, scmd->device->id, smid,
+			MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK);
+	mr_device_priv_data->tm_busy = 0;
+
+	mutex_unlock(&instance->reset_mutex);
+out:
+	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
+			((ret == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+	return ret;
+}
+
+/*
+ * megasas_reset_target_fusion : target reset function for fusion adapters
+ * scmd: SCSI command pointer
+ *
+ * Returns SUCCESS if all commands associated with target aborted else FAILED
+ */
+
+int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
+{
+
+	struct megasas_instance *instance;
+	int ret = FAILED;
+	u16 devhandle;
+	struct fusion_context *fusion;
+	struct MR_PRIV_DEVICE *mr_device_priv_data;
+	mr_device_priv_data = scmd->device->hostdata;
+
+	instance = (struct megasas_instance *)scmd->device->host->hostdata;
+	fusion = instance->ctrl_context;
+
+	sdev_printk(KERN_INFO, scmd->device,
+		    "target reset called for scmd(%p)\n", scmd);
+
+	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
+		dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
+		"SCSI host:%d\n", instance->host->host_no);
+		ret = FAILED;
+		return ret;
+	}
+
+	if (!mr_device_priv_data) {
+		sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
+			"scmd(%p)\n", scmd);
+		scmd->result = DID_NO_CONNECT << 16;
+		ret = SUCCESS;
+		goto out;
+	}
+
+
+	if (!mr_device_priv_data->is_tm_capable) {
+		ret = FAILED;
+		goto out;
+	}
+
+	mutex_lock(&instance->reset_mutex);
+	devhandle = megasas_get_tm_devhandle(scmd->device);
+
+	if (devhandle == (u16)ULONG_MAX) {
+		ret = SUCCESS;
+		sdev_printk(KERN_INFO, scmd->device,
+			"target reset issued for invalid devhandle\n");
+		mutex_unlock(&instance->reset_mutex);
+		goto out;
+	}
+
+	sdev_printk(KERN_INFO, scmd->device,
+		"attempting target reset! scmd(%p) tm_dev_handle 0x%x\n",
+		scmd, devhandle);
+	mr_device_priv_data->tm_busy = 1;
+	ret = megasas_issue_tm(instance, devhandle,
+			scmd->device->channel, scmd->device->id, 0,
+			MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
+	mr_device_priv_data->tm_busy = 0;
+	mutex_unlock(&instance->reset_mutex);
+out:
+	scmd_printk(KERN_NOTICE, scmd, "megasas: target reset %s!!\n",
+		(ret == SUCCESS) ? "SUCCESS" : "FAILED");
+
+	return ret;
+}
+
+/*SRIOV get other instance in cluster if any*/
+struct megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance)
+{
+	int i;
+
+	for (i = 0; i < MAX_MGMT_ADAPTERS; i++) {
+		if (megasas_mgmt_info.instance[i] &&
+			(megasas_mgmt_info.instance[i] != instance) &&
+			 megasas_mgmt_info.instance[i]->requestorId &&
+			 megasas_mgmt_info.instance[i]->peerIsPresent &&
+			(memcmp((megasas_mgmt_info.instance[i]->clusterId),
+			instance->clusterId, MEGASAS_CLUSTER_ID_SIZE) == 0))
+			return megasas_mgmt_info.instance[i];
+	}
+	return NULL;
+}
+
+/* Check for a second path that is currently UP */
+int megasas_check_mpio_paths(struct megasas_instance *instance,
+	struct scsi_cmnd *scmd)
+{
+	struct megasas_instance *peer_instance = NULL;
+	int retval = (DID_REQUEUE << 16);
+
+	if (instance->peerIsPresent) {
+		peer_instance = megasas_get_peer_instance(instance);
+		if ((peer_instance) &&
+			(atomic_read(&peer_instance->adprecovery) ==
+			MEGASAS_HBA_OPERATIONAL))
+			retval = (DID_NO_CONNECT << 16);
+	}
+	return retval;
+}
+
+/* Core fusion reset function */
+int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
+{
+	int retval = SUCCESS, i, j, convert = 0;
+	struct megasas_instance *instance;
+	struct megasas_cmd_fusion *cmd_fusion, *r1_cmd;
+	struct fusion_context *fusion;
+	u32 abs_state, status_reg, reset_adapter;
+	u32 io_timeout_in_crash_mode = 0;
+	struct scsi_cmnd *scmd_local = NULL;
+	struct scsi_device *sdev;
+
+	instance = (struct megasas_instance *)shost->hostdata;
+	fusion = instance->ctrl_context;
+
+	mutex_lock(&instance->reset_mutex);
+
+	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+		dev_warn(&instance->pdev->dev, "Hardware critical error, "
+		       "returning FAILED for scsi%d.\n",
+			instance->host->host_no);
+		mutex_unlock(&instance->reset_mutex);
+		return FAILED;
+	}
+	status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
+	abs_state = status_reg & MFI_STATE_MASK;
+
+	/* IO timeout detected, forcibly put FW in FAULT state */
+	if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf &&
+		instance->crash_dump_app_support && reason) {
+		dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, "
+			"forcibly FAULT Firmware\n");
+		atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
+		status_reg = readl(&instance->reg_set->doorbell);
+		writel(status_reg | MFI_STATE_FORCE_OCR,
+			&instance->reg_set->doorbell);
+		readl(&instance->reg_set->doorbell);
+		mutex_unlock(&instance->reset_mutex);
+		do {
+			ssleep(3);
+			io_timeout_in_crash_mode++;
+			dev_dbg(&instance->pdev->dev, "waiting for [%d] "
+				"seconds for crash dump collection and OCR "
+				"to be done\n", (io_timeout_in_crash_mode * 3));
+		} while ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
+			(io_timeout_in_crash_mode < 80));
+
+		if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
+			dev_info(&instance->pdev->dev, "OCR done for IO "
+				"timeout case\n");
+			retval = SUCCESS;
+		} else {
+			dev_info(&instance->pdev->dev, "Controller is not "
+				"operational after 240 seconds wait for IO "
+				"timeout case in FW crash dump mode\n do "
+				"OCR/kill adapter\n");
+			retval = megasas_reset_fusion(shost, 0);
+		}
+		return retval;
+	}
+
+	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+		del_timer_sync(&instance->sriov_heartbeat_timer);
+	set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+	set_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags);
+	atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
+	instance->instancet->disable_intr(instance);
+	megasas_sync_irqs((unsigned long)instance);
+
+	/* First try waiting for commands to complete */
+	if (megasas_wait_for_outstanding_fusion(instance, reason,
+						&convert)) {
+		atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
+		dev_warn(&instance->pdev->dev, "resetting fusion "
+		       "adapter scsi%d.\n", instance->host->host_no);
+		if (convert)
+			reason = 0;
+
+		if (megasas_dbg_lvl & OCR_LOGS)
+			dev_info(&instance->pdev->dev, "\nPending SCSI commands:\n");
+
+		/* Now return commands back to the OS */
+		for (i = 0 ; i < instance->max_scsi_cmds; i++) {
+			cmd_fusion = fusion->cmd_list[i];
+			/*check for extra commands issued by driver*/
+			if (instance->adapter_type == VENTURA_SERIES) {
+				r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds];
+				megasas_return_cmd_fusion(instance, r1_cmd);
+			}
+			scmd_local = cmd_fusion->scmd;
+			if (cmd_fusion->scmd) {
+				if (megasas_dbg_lvl & OCR_LOGS) {
+					sdev_printk(KERN_INFO,
+						cmd_fusion->scmd->device, "SMID: 0x%x\n",
+						cmd_fusion->index);
+					scsi_print_command(cmd_fusion->scmd);
+				}
+
+				scmd_local->result =
+					megasas_check_mpio_paths(instance,
+							scmd_local);
+				if (instance->ldio_threshold &&
+					megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
+					atomic_dec(&instance->ldio_outstanding);
+				megasas_return_cmd_fusion(instance, cmd_fusion);
+				scsi_dma_unmap(scmd_local);
+				scmd_local->scsi_done(scmd_local);
+			}
+		}
+
+		atomic_set(&instance->fw_outstanding, 0);
+
+		status_reg = instance->instancet->read_fw_status_reg(
+			instance->reg_set);
+		abs_state = status_reg & MFI_STATE_MASK;
+		reset_adapter = status_reg & MFI_RESET_ADAPTER;
+		if (instance->disableOnlineCtrlReset ||
+		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
+			/* Reset not supported, kill adapter */
+			dev_warn(&instance->pdev->dev, "Reset not supported"
+			       ", killing adapter scsi%d.\n",
+				instance->host->host_no);
+			megaraid_sas_kill_hba(instance);
+			instance->skip_heartbeat_timer_del = 1;
+			retval = FAILED;
+			goto out;
+		}
+
+		/* Let SR-IOV VF & PF sync up if there was a HB failure */
+		if (instance->requestorId && !reason) {
+			msleep(MEGASAS_OCR_SETTLE_TIME_VF);
+			goto transition_to_ready;
+		}
+
+		/* Now try to reset the chip */
+		for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
+
+			if (instance->instancet->adp_reset
+				(instance, instance->reg_set))
+				continue;
+transition_to_ready:
+			/* Wait for FW to become ready */
+			if (megasas_transition_to_ready(instance, 1)) {
+				dev_warn(&instance->pdev->dev,
+					"Failed to transition controller to ready for "
+					"scsi%d.\n", instance->host->host_no);
+				if (instance->requestorId && !reason)
+					goto fail_kill_adapter;
+				else
+					continue;
+			}
+			megasas_reset_reply_desc(instance);
+			megasas_fusion_update_can_queue(instance, OCR_CONTEXT);
+
+			if (megasas_ioc_init_fusion(instance)) {
+				if (instance->requestorId && !reason)
+					goto fail_kill_adapter;
+				else
+					continue;
+			}
+
+			megasas_refire_mgmt_cmd(instance);
+
+			if (megasas_get_ctrl_info(instance)) {
+				dev_info(&instance->pdev->dev,
+					"Failed from %s %d\n",
+					__func__, __LINE__);
+				megaraid_sas_kill_hba(instance);
+				retval = FAILED;
+				goto out;
+			}
+			/* Reset load balance info */
+			if (fusion->load_balance_info)
+				memset(fusion->load_balance_info, 0,
+				       (sizeof(struct LD_LOAD_BALANCE_INFO) *
+				       MAX_LOGICAL_DRIVES_EXT));
+
+			if (!megasas_get_map_info(instance))
+				megasas_sync_map_info(instance);
+
+			megasas_setup_jbod_map(instance);
+
+			shost_for_each_device(sdev, shost)
+				megasas_set_dynamic_target_properties(sdev);
+
+			/* reset stream detection array */
+			if (instance->adapter_type == VENTURA_SERIES) {
+				for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
+					memset(fusion->stream_detect_by_ld[j],
+					0, sizeof(struct LD_STREAM_DETECT));
+				 fusion->stream_detect_by_ld[j]->mru_bit_map
+						= MR_STREAM_BITMAP;
+				}
+			}
+
+			clear_bit(MEGASAS_FUSION_IN_RESET,
+				  &instance->reset_flags);
+			instance->instancet->enable_intr(instance);
+			atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
+
+			dev_info(&instance->pdev->dev, "Interrupts are enabled and"
+				" controller is OPERATIONAL for scsi:%d\n",
+				instance->host->host_no);
+
+			/* Restart SR-IOV heartbeat */
+			if (instance->requestorId) {
+				if (!megasas_sriov_start_heartbeat(instance, 0))
+					megasas_start_timer(instance,
+							    &instance->sriov_heartbeat_timer,
+							    megasas_sriov_heartbeat_handler,
+							    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+				else
+					instance->skip_heartbeat_timer_del = 1;
+			}
+
+			if (instance->crash_dump_drv_support &&
+				instance->crash_dump_app_support)
+				megasas_set_crash_dump_params(instance,
+					MR_CRASH_BUF_TURN_ON);
+			else
+				megasas_set_crash_dump_params(instance,
+					MR_CRASH_BUF_TURN_OFF);
+
+			retval = SUCCESS;
+
+			/* Adapter reset completed successfully */
+			dev_warn(&instance->pdev->dev,
+				 "Reset successful for scsi%d.\n",
+				 instance->host->host_no);
+
+			goto out;
+		}
+fail_kill_adapter:
+		/* Reset failed, kill the adapter */
+		dev_warn(&instance->pdev->dev, "Reset failed, killing "
+		       "adapter scsi%d.\n", instance->host->host_no);
+		megaraid_sas_kill_hba(instance);
+		instance->skip_heartbeat_timer_del = 1;
+		retval = FAILED;
+	} else {
+		/* For VF: Restart HB timer if we didn't OCR */
+		if (instance->requestorId) {
+			megasas_start_timer(instance,
+					    &instance->sriov_heartbeat_timer,
+					    megasas_sriov_heartbeat_handler,
+					    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+		}
+		clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+		instance->instancet->enable_intr(instance);
+		atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
+	}
+out:
+	clear_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags);
+	mutex_unlock(&instance->reset_mutex);
+	return retval;
+}
+
+/* Fusion Crash dump collection work queue */
+void  megasas_fusion_crash_dump_wq(struct work_struct *work)
+{
+	struct megasas_instance *instance =
+		container_of(work, struct megasas_instance, crash_init);
+	u32 status_reg;
+	u8 partial_copy = 0;
+
+
+	status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
+
+	/*
+	 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer
+	 * to host crash buffers
+	 */
+	if (instance->drv_buf_index == 0) {
+		/* Buffer is already allocated for old Crash dump.
+		 * Do OCR and do not wait for crash dump collection
+		 */
+		if (instance->drv_buf_alloc) {
+			dev_info(&instance->pdev->dev, "earlier crash dump is "
+				"not yet copied by application, ignoring this "
+				"crash dump and initiating OCR\n");
+			status_reg |= MFI_STATE_CRASH_DUMP_DONE;
+			writel(status_reg,
+				&instance->reg_set->outbound_scratch_pad);
+			readl(&instance->reg_set->outbound_scratch_pad);
+			return;
+		}
+		megasas_alloc_host_crash_buffer(instance);
+		dev_info(&instance->pdev->dev, "Number of host crash buffers "
+			"allocated: %d\n", instance->drv_buf_alloc);
+	}
+
+	/*
+	 * Driver has allocated max buffers, which can be allocated
+	 * and FW has more crash dump data, then driver will
+	 * ignore the data.
+	 */
+	if (instance->drv_buf_index >= (instance->drv_buf_alloc)) {
+		dev_info(&instance->pdev->dev, "Driver is done copying "
+			"the buffer: %d\n", instance->drv_buf_alloc);
+		status_reg |= MFI_STATE_CRASH_DUMP_DONE;
+		partial_copy = 1;
+	} else {
+		memcpy(instance->crash_buf[instance->drv_buf_index],
+			instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
+		instance->drv_buf_index++;
+		status_reg &= ~MFI_STATE_DMADONE;
+	}
+
+	if (status_reg & MFI_STATE_CRASH_DUMP_DONE) {
+		dev_info(&instance->pdev->dev, "Crash Dump is available,number "
+			"of copied buffers: %d\n", instance->drv_buf_index);
+		instance->fw_crash_buffer_size =  instance->drv_buf_index;
+		instance->fw_crash_state = AVAILABLE;
+		instance->drv_buf_index = 0;
+		writel(status_reg, &instance->reg_set->outbound_scratch_pad);
+		readl(&instance->reg_set->outbound_scratch_pad);
+		if (!partial_copy)
+			megasas_reset_fusion(instance->host, 0);
+	} else {
+		writel(status_reg, &instance->reg_set->outbound_scratch_pad);
+		readl(&instance->reg_set->outbound_scratch_pad);
+	}
+}
+
+
+/* Fusion OCR work queue */
+void megasas_fusion_ocr_wq(struct work_struct *work)
+{
+	struct megasas_instance *instance =
+		container_of(work, struct megasas_instance, work_init);
+
+	megasas_reset_fusion(instance->host, 0);
+}
+
+/* Allocate fusion context */
+int
+megasas_alloc_fusion_context(struct megasas_instance *instance)
+{
+	struct fusion_context *fusion;
+
+	instance->ctrl_context_pages = get_order(sizeof(struct fusion_context));
+	instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+		instance->ctrl_context_pages);
+	if (!instance->ctrl_context) {
+		/* fall back to using vmalloc for fusion_context */
+		instance->ctrl_context = vzalloc(sizeof(struct fusion_context));
+		if (!instance->ctrl_context) {
+			dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__);
+			return -ENOMEM;
+		}
+	}
+
+	fusion = instance->ctrl_context;
+
+	fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
+		sizeof(struct LD_LOAD_BALANCE_INFO));
+	fusion->load_balance_info =
+		(struct LD_LOAD_BALANCE_INFO *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+		fusion->load_balance_info_pages);
+	if (!fusion->load_balance_info) {
+		fusion->load_balance_info = vzalloc(MAX_LOGICAL_DRIVES_EXT *
+			sizeof(struct LD_LOAD_BALANCE_INFO));
+		if (!fusion->load_balance_info)
+			dev_err(&instance->pdev->dev, "Failed to allocate load_balance_info, "
+				"continuing without Load Balance support\n");
+	}
+
+	return 0;
+}
+
+void
+megasas_free_fusion_context(struct megasas_instance *instance)
+{
+	struct fusion_context *fusion = instance->ctrl_context;
+
+	if (fusion) {
+		if (fusion->load_balance_info) {
+			if (is_vmalloc_addr(fusion->load_balance_info))
+				vfree(fusion->load_balance_info);
+			else
+				free_pages((ulong)fusion->load_balance_info,
+					fusion->load_balance_info_pages);
+		}
+
+		if (is_vmalloc_addr(fusion))
+			vfree(fusion);
+		else
+			free_pages((ulong)fusion,
+				instance->ctrl_context_pages);
+	}
+}
+
+struct megasas_instance_template megasas_instance_template_fusion = {
+	.enable_intr = megasas_enable_intr_fusion,
+	.disable_intr = megasas_disable_intr_fusion,
+	.clear_intr = megasas_clear_intr_fusion,
+	.read_fw_status_reg = megasas_read_fw_status_reg_fusion,
+	.adp_reset = megasas_adp_reset_fusion,
+	.check_reset = megasas_check_reset_fusion,
+	.service_isr = megasas_isr_fusion,
+	.tasklet = megasas_complete_cmd_dpc_fusion,
+	.init_adapter = megasas_init_adapter_fusion,
+	.build_and_issue_cmd = megasas_build_and_issue_cmd_fusion,
+	.issue_dcmd = megasas_issue_dcmd_fusion,
+};
diff --git a/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_sas_fusion.h b/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_sas_fusion.h
new file mode 100644
index 0000000..40724df
--- /dev/null
+++ b/src/kernel/linux/v4.14/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -0,0 +1,1338 @@
+/*
+ *  Linux MegaRAID driver for SAS based RAID controllers
+ *
+ *  Copyright (c) 2009-2013  LSI Corporation
+ *  Copyright (c) 2013-2014  Avago Technologies
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ *  FILE: megaraid_sas_fusion.h
+ *
+ *  Authors: Avago Technologies
+ *           Manoj Jose
+ *           Sumant Patro
+ *           Kashyap Desai <kashyap.desai@avagotech.com>
+ *           Sumit Saxena <sumit.saxena@avagotech.com>
+ *
+ *  Send feedback to: megaraidlinux.pdl@avagotech.com
+ *
+ *  Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
+ *  San Jose, California 95131
+ */
+
+#ifndef _MEGARAID_SAS_FUSION_H_
+#define _MEGARAID_SAS_FUSION_H_
+
+/* Fusion defines */
+#define MEGASAS_CHAIN_FRAME_SZ_MIN 1024
+#define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009)
+#define MEGASAS_MAX_CHAIN_SHIFT			5
+#define MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK	0x400000
+#define MEGASAS_MAX_CHAIN_SIZE_MASK		0x3E0
+#define MEGASAS_256K_IO				128
+#define MEGASAS_1MB_IO				(MEGASAS_256K_IO * 4)
+#define MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256
+#define MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST   0xF0
+#define MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST         0xF1
+#define MEGASAS_LOAD_BALANCE_FLAG		    0x1
+#define MEGASAS_DCMD_MBOX_PEND_FLAG		    0x1
+#define HOST_DIAG_WRITE_ENABLE			    0x80
+#define HOST_DIAG_RESET_ADAPTER			    0x4
+#define MEGASAS_FUSION_MAX_RESET_TRIES		    3
+#define MAX_MSIX_QUEUES_FUSION			    128
+
+/* Invader defines */
+#define MPI2_TYPE_CUDA				    0x2
+#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH   0x4000
+#define	MR_RL_FLAGS_GRANT_DESTINATION_CPU0	    0x00
+#define	MR_RL_FLAGS_GRANT_DESTINATION_CPU1	    0x10
+#define	MR_RL_FLAGS_GRANT_DESTINATION_CUDA	    0x80
+#define MR_RL_FLAGS_SEQ_NUM_ENABLE		    0x8
+#define MR_RL_WRITE_THROUGH_MODE		    0x00
+#define MR_RL_WRITE_BACK_MODE			    0x01
+
+/* T10 PI defines */
+#define MR_PROT_INFO_TYPE_CONTROLLER                0x8
+#define MEGASAS_SCSI_VARIABLE_LENGTH_CMD            0x7f
+#define MEGASAS_SCSI_SERVICE_ACTION_READ32          0x9
+#define MEGASAS_SCSI_SERVICE_ACTION_WRITE32         0xB
+#define MEGASAS_SCSI_ADDL_CDB_LEN                   0x18
+#define MEGASAS_RD_WR_PROTECT_CHECK_ALL		    0x20
+#define MEGASAS_RD_WR_PROTECT_CHECK_NONE	    0x60
+
+#define MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET   (0x0000030C)
+#define MPI2_REPLY_POST_HOST_INDEX_OFFSET	(0x0000006C)
+
+/*
+ * Raid context flags
+ */
+
+#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT   0x4
+#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_MASK    0x30
+enum MR_RAID_FLAGS_IO_SUB_TYPE {
+	MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0,
+	MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1,
+	MR_RAID_FLAGS_IO_SUB_TYPE_RMW_DATA     = 2,
+	MR_RAID_FLAGS_IO_SUB_TYPE_RMW_P        = 3,
+	MR_RAID_FLAGS_IO_SUB_TYPE_RMW_Q        = 4,
+	MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS = 6,
+	MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT = 7
+};
+
+/*
+ * Request descriptor types
+ */
+#define MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO           0x7
+#define MEGASAS_REQ_DESCRIPT_FLAGS_MFA             0x1
+#define MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK	   0x2
+#define MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT      1
+
+#define MEGASAS_FP_CMD_LEN	16
+#define MEGASAS_FUSION_IN_RESET 0
+#define MEGASAS_FUSION_OCR_NOT_POSSIBLE 1
+#define THRESHOLD_REPLY_COUNT 50
+#define RAID_1_PEER_CMDS 2
+#define JBOD_MAPS_COUNT	2
+
+/*
+ * Raid Context structure which describes MegaRAID specific IO Parameters
+ * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames
+ */
+
+struct RAID_CONTEXT {
+#if   defined(__BIG_ENDIAN_BITFIELD)
+	u8 nseg:4;
+	u8 type:4;
+#else
+	u8 type:4;
+	u8 nseg:4;
+#endif
+	u8 resvd0;
+	__le16 timeout_value;
+	u8 reg_lock_flags;
+	u8 resvd1;
+	__le16 virtual_disk_tgt_id;
+	__le64 reg_lock_row_lba;
+	__le32 reg_lock_length;
+	__le16 next_lmid;
+	u8 ex_status;
+	u8 status;
+	u8 raid_flags;
+	u8 num_sge;
+	__le16 config_seq_num;
+	u8 span_arm;
+	u8 priority;
+	u8 num_sge_ext;
+	u8 resvd2;
+};
+
+/*
+ * Raid Context structure which describes ventura MegaRAID specific
+ * IO Paramenters ,This resides at offset 0x60 where the SGL normally
+ * starts in MPT IO Frames
+ */
+struct RAID_CONTEXT_G35 {
+	#define RAID_CONTEXT_NSEG_MASK	0x00F0
+	#define RAID_CONTEXT_NSEG_SHIFT	4
+	#define RAID_CONTEXT_TYPE_MASK	0x000F
+	#define RAID_CONTEXT_TYPE_SHIFT	0
+	u16		nseg_type;
+	u16 timeout_value; /* 0x02 -0x03 */
+	u16		routing_flags;	// 0x04 -0x05 routing flags
+	u16 virtual_disk_tgt_id;   /* 0x06 -0x07 */
+	u64 reg_lock_row_lba;      /* 0x08 - 0x0F */
+	u32 reg_lock_length;      /* 0x10 - 0x13 */
+	union {
+		u16 next_lmid; /* 0x14 - 0x15 */
+		u16	peer_smid;	/* used for the raid 1/10 fp writes */
+	} smid;
+	u8 ex_status;       /* 0x16 : OUT */
+	u8 status;          /* 0x17 status */
+	u8 raid_flags;		/* 0x18 resvd[7:6], ioSubType[5:4],
+				 * resvd[3:1], preferredCpu[0]
+				 */
+	u8 span_arm;            /* 0x1C span[7:5], arm[4:0] */
+	u16	config_seq_num;           /* 0x1A -0x1B */
+	union {
+		/*
+		 * Bit format:
+		 *	 ---------------------------------
+		 *	 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+		 *	 ---------------------------------
+		 * Byte0 |    numSGE[7]- numSGE[0]	 |
+		 *	 ---------------------------------
+		 * Byte1 |SD | resvd     | numSGE 8-11   |
+		 *        --------------------------------
+		 */
+		#define NUM_SGE_MASK_LOWER	0xFF
+		#define NUM_SGE_MASK_UPPER	0x0F
+		#define NUM_SGE_SHIFT_UPPER	8
+		#define STREAM_DETECT_SHIFT	7
+		#define STREAM_DETECT_MASK	0x80
+		struct {
+#if   defined(__BIG_ENDIAN_BITFIELD) /* 0x1C - 0x1D */
+			u16 stream_detected:1;
+			u16 reserved:3;
+			u16 num_sge:12;
+#else
+			u16 num_sge:12;
+			u16 reserved:3;
+			u16 stream_detected:1;
+#endif
+		} bits;
+		u8 bytes[2];
+	} u;
+	u8 resvd2[2];          /* 0x1E-0x1F */
+};
+
+#define MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT	1
+#define MR_RAID_CTX_ROUTINGFLAGS_C2D_SHIFT	2
+#define MR_RAID_CTX_ROUTINGFLAGS_FWD_SHIFT	3
+#define MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT	4
+#define MR_RAID_CTX_ROUTINGFLAGS_SBS_SHIFT	5
+#define MR_RAID_CTX_ROUTINGFLAGS_RW_SHIFT	6
+#define MR_RAID_CTX_ROUTINGFLAGS_LOG_SHIFT	7
+#define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT	8
+#define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_MASK	0x0F00
+#define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_SHIFT	12
+#define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_MASK	0xF000
+
+static inline void set_num_sge(struct RAID_CONTEXT_G35 *rctx_g35,
+			       u16 sge_count)
+{
+	rctx_g35->u.bytes[0] = (u8)(sge_count & NUM_SGE_MASK_LOWER);
+	rctx_g35->u.bytes[1] |= (u8)((sge_count >> NUM_SGE_SHIFT_UPPER)
+							& NUM_SGE_MASK_UPPER);
+}
+
+static inline u16 get_num_sge(struct RAID_CONTEXT_G35 *rctx_g35)
+{
+	u16 sge_count;
+
+	sge_count = (u16)(((rctx_g35->u.bytes[1] & NUM_SGE_MASK_UPPER)
+			<< NUM_SGE_SHIFT_UPPER) | (rctx_g35->u.bytes[0]));
+	return sge_count;
+}
+
+#define SET_STREAM_DETECTED(rctx_g35) \
+	(rctx_g35.u.bytes[1] |= STREAM_DETECT_MASK)
+
+#define CLEAR_STREAM_DETECTED(rctx_g35) \
+	(rctx_g35.u.bytes[1] &= ~(STREAM_DETECT_MASK))
+
+static inline bool is_stream_detected(struct RAID_CONTEXT_G35 *rctx_g35)
+{
+	return ((rctx_g35->u.bytes[1] & STREAM_DETECT_MASK));
+}
+
+union RAID_CONTEXT_UNION {
+	struct RAID_CONTEXT raid_context;
+	struct RAID_CONTEXT_G35 raid_context_g35;
+};
+
+#define RAID_CTX_SPANARM_ARM_SHIFT	(0)
+#define RAID_CTX_SPANARM_ARM_MASK	(0x1f)
+
+#define RAID_CTX_SPANARM_SPAN_SHIFT	(5)
+#define RAID_CTX_SPANARM_SPAN_MASK	(0xE0)
+
+/* number of bits per index in U32 TrackStream */
+#define BITS_PER_INDEX_STREAM		4
+#define INVALID_STREAM_NUM              16
+#define MR_STREAM_BITMAP		0x76543210
+#define STREAM_MASK			((1 << BITS_PER_INDEX_STREAM) - 1)
+#define ZERO_LAST_STREAM		0x0fffffff
+#define MAX_STREAMS_TRACKED		8
+
+/*
+ * define region lock types
+ */
+enum REGION_TYPE {
+	REGION_TYPE_UNUSED       = 0,
+	REGION_TYPE_SHARED_READ  = 1,
+	REGION_TYPE_SHARED_WRITE = 2,
+	REGION_TYPE_EXCLUSIVE    = 3,
+};
+
+/* MPI2 defines */
+#define MPI2_FUNCTION_IOC_INIT              (0x02) /* IOC Init */
+#define MPI2_WHOINIT_HOST_DRIVER            (0x04)
+#define MPI2_VERSION_MAJOR                  (0x02)
+#define MPI2_VERSION_MINOR                  (0x00)
+#define MPI2_VERSION_MAJOR_MASK             (0xFF00)
+#define MPI2_VERSION_MAJOR_SHIFT            (8)
+#define MPI2_VERSION_MINOR_MASK             (0x00FF)
+#define MPI2_VERSION_MINOR_SHIFT            (0)
+#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+		      MPI2_VERSION_MINOR)
+#define MPI2_HEADER_VERSION_UNIT            (0x10)
+#define MPI2_HEADER_VERSION_DEV             (0x00)
+#define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
+#define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)
+#define MPI2_HEADER_VERSION_DEV_MASK        (0x00FF)
+#define MPI2_HEADER_VERSION_DEV_SHIFT       (0)
+#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \
+			     MPI2_HEADER_VERSION_DEV)
+#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR      (0x03)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG        (0x8000)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG          (0x0400)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP       (0x0003)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG          (0x0200)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD           (0x0100)
+#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP             (0x0004)
+/* EEDP escape mode */
+#define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE  (0x0040)
+#define MPI2_FUNCTION_SCSI_IO_REQUEST               (0x00) /* SCSI IO */
+#define MPI2_FUNCTION_SCSI_TASK_MGMT                (0x01)
+#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY       (0x03)
+#define MPI2_REQ_DESCRIPT_FLAGS_FP_IO               (0x06)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO                 (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING        (0x02)
+#define MPI2_SCSIIO_CONTROL_WRITE               (0x01000000)
+#define MPI2_SCSIIO_CONTROL_READ                (0x02000000)
+#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK       (0x0E)
+#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED          (0x0F)
+#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
+#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK       (0x0F)
+#define MPI2_WRSEQ_FLUSH_KEY_VALUE              (0x0)
+#define MPI2_WRITE_SEQUENCE_OFFSET              (0x00000004)
+#define MPI2_WRSEQ_1ST_KEY_VALUE                (0xF)
+#define MPI2_WRSEQ_2ND_KEY_VALUE                (0x4)
+#define MPI2_WRSEQ_3RD_KEY_VALUE                (0xB)
+#define MPI2_WRSEQ_4TH_KEY_VALUE                (0x2)
+#define MPI2_WRSEQ_5TH_KEY_VALUE                (0x7)
+#define MPI2_WRSEQ_6TH_KEY_VALUE                (0xD)
+
+struct MPI25_IEEE_SGE_CHAIN64 {
+	__le64			Address;
+	__le32			Length;
+	__le16			Reserved1;
+	u8                      NextChainOffset;
+	u8                      Flags;
+};
+
+struct MPI2_SGE_SIMPLE_UNION {
+	__le32                     FlagsLength;
+	union {
+		__le32                 Address32;
+		__le64                 Address64;
+	} u;
+};
+
+struct MPI2_SCSI_IO_CDB_EEDP32 {
+	u8                      CDB[20];                    /* 0x00 */
+	__be32			PrimaryReferenceTag;        /* 0x14 */
+	__be16			PrimaryApplicationTag;      /* 0x18 */
+	__be16			PrimaryApplicationTagMask;  /* 0x1A */
+	__le32			TransferLength;             /* 0x1C */
+};
+
+struct MPI2_SGE_CHAIN_UNION {
+	__le16			Length;
+	u8                      NextChainOffset;
+	u8                      Flags;
+	union {
+		__le32		Address32;
+		__le64		Address64;
+	} u;
+};
+
+struct MPI2_IEEE_SGE_SIMPLE32 {
+	__le32			Address;
+	__le32			FlagsLength;
+};
+
+struct MPI2_IEEE_SGE_CHAIN32 {
+	__le32			Address;
+	__le32			FlagsLength;
+};
+
+struct MPI2_IEEE_SGE_SIMPLE64 {
+	__le64			Address;
+	__le32			Length;
+	__le16			Reserved1;
+	u8                      Reserved2;
+	u8                      Flags;
+};
+
+struct MPI2_IEEE_SGE_CHAIN64 {
+	__le64			Address;
+	__le32			Length;
+	__le16			Reserved1;
+	u8                      Reserved2;
+	u8                      Flags;
+};
+
+union MPI2_IEEE_SGE_SIMPLE_UNION {
+	struct MPI2_IEEE_SGE_SIMPLE32  Simple32;
+	struct MPI2_IEEE_SGE_SIMPLE64  Simple64;
+};
+
+union MPI2_IEEE_SGE_CHAIN_UNION {
+	struct MPI2_IEEE_SGE_CHAIN32   Chain32;
+	struct MPI2_IEEE_SGE_CHAIN64   Chain64;
+};
+
+union MPI2_SGE_IO_UNION {
+	struct MPI2_SGE_SIMPLE_UNION       MpiSimple;
+	struct MPI2_SGE_CHAIN_UNION        MpiChain;
+	union MPI2_IEEE_SGE_SIMPLE_UNION  IeeeSimple;
+	union MPI2_IEEE_SGE_CHAIN_UNION   IeeeChain;
+};
+
+union MPI2_SCSI_IO_CDB_UNION {
+	u8                      CDB32[32];
+	struct MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
+	struct MPI2_SGE_SIMPLE_UNION SGE;
+};
+
+/****************************************************************************
+*  SCSI Task Management messages
+****************************************************************************/
+
+/*SCSI Task Management Request Message */
+struct MPI2_SCSI_TASK_MANAGE_REQUEST {
+	u16 DevHandle;		/*0x00 */
+	u8 ChainOffset;		/*0x02 */
+	u8 Function;		/*0x03 */
+	u8 Reserved1;		/*0x04 */
+	u8 TaskType;		/*0x05 */
+	u8 Reserved2;		/*0x06 */
+	u8 MsgFlags;		/*0x07 */
+	u8 VP_ID;		/*0x08 */
+	u8 VF_ID;		/*0x09 */
+	u16 Reserved3;		/*0x0A */
+	u8 LUN[8];		/*0x0C */
+	u32 Reserved4[7];	/*0x14 */
+	u16 TaskMID;		/*0x30 */
+	u16 Reserved5;		/*0x32 */
+};
+
+
+/*SCSI Task Management Reply Message */
+struct MPI2_SCSI_TASK_MANAGE_REPLY {
+	u16 DevHandle;		/*0x00 */
+	u8 MsgLength;		/*0x02 */
+	u8 Function;		/*0x03 */
+	u8 ResponseCode;	/*0x04 */
+	u8 TaskType;		/*0x05 */
+	u8 Reserved1;		/*0x06 */
+	u8 MsgFlags;		/*0x07 */
+	u8 VP_ID;		/*0x08 */
+	u8 VF_ID;		/*0x09 */
+	u16 Reserved2;		/*0x0A */
+	u16 Reserved3;		/*0x0C */
+	u16 IOCStatus;		/*0x0E */
+	u32 IOCLogInfo;		/*0x10 */
+	u32 TerminationCount;	/*0x14 */
+	u32 ResponseInfo;	/*0x18 */
+};
+
+struct MR_TM_REQUEST {
+	char request[128];
+};
+
+struct MR_TM_REPLY {
+	char reply[128];
+};
+
+/* SCSI Task Management Request Message */
+struct MR_TASK_MANAGE_REQUEST {
+	/*To be type casted to struct MPI2_SCSI_TASK_MANAGE_REQUEST */
+	struct MR_TM_REQUEST         TmRequest;
+	union {
+		struct {
+#if   defined(__BIG_ENDIAN_BITFIELD)
+			u32 reserved1:30;
+			u32 isTMForPD:1;
+			u32 isTMForLD:1;
+#else
+			u32 isTMForLD:1;
+			u32 isTMForPD:1;
+			u32 reserved1:30;
+#endif
+			u32 reserved2;
+		} tmReqFlags;
+		struct MR_TM_REPLY   TMReply;
+	};
+};
+
+/* TaskType values */
+
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK           (0x01)
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET        (0x02)
+#define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET         (0x03)
+#define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET   (0x05)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET       (0x06)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK           (0x07)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA              (0x08)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET         (0x09)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT      (0x0A)
+
+/* ResponseCode values */
+
+#define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE               (0x00)
+#define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME             (0x02)
+#define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED          (0x04)
+#define MPI2_SCSITASKMGMT_RSP_TM_FAILED                 (0x05)
+#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED              (0x08)
+#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN            (0x09)
+#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG         (0x0A)
+#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC          (0x80)
+
+/*
+ * RAID SCSI IO Request Message
+ * Total SGE count will be one less than  _MPI2_SCSI_IO_REQUEST
+ */
+struct MPI2_RAID_SCSI_IO_REQUEST {
+	__le16			DevHandle;                      /* 0x00 */
+	u8                      ChainOffset;                    /* 0x02 */
+	u8                      Function;                       /* 0x03 */
+	__le16			Reserved1;                      /* 0x04 */
+	u8                      Reserved2;                      /* 0x06 */
+	u8                      MsgFlags;                       /* 0x07 */
+	u8                      VP_ID;                          /* 0x08 */
+	u8                      VF_ID;                          /* 0x09 */
+	__le16			Reserved3;                      /* 0x0A */
+	__le32			SenseBufferLowAddress;          /* 0x0C */
+	__le16			SGLFlags;                       /* 0x10 */
+	u8                      SenseBufferLength;              /* 0x12 */
+	u8                      Reserved4;                      /* 0x13 */
+	u8                      SGLOffset0;                     /* 0x14 */
+	u8                      SGLOffset1;                     /* 0x15 */
+	u8                      SGLOffset2;                     /* 0x16 */
+	u8                      SGLOffset3;                     /* 0x17 */
+	__le32			SkipCount;                      /* 0x18 */
+	__le32			DataLength;                     /* 0x1C */
+	__le32			BidirectionalDataLength;        /* 0x20 */
+	__le16			IoFlags;                        /* 0x24 */
+	__le16			EEDPFlags;                      /* 0x26 */
+	__le32			EEDPBlockSize;                  /* 0x28 */
+	__le32			SecondaryReferenceTag;          /* 0x2C */
+	__le16			SecondaryApplicationTag;        /* 0x30 */
+	__le16			ApplicationTagTranslationMask;  /* 0x32 */
+	u8                      LUN[8];                         /* 0x34 */
+	__le32			Control;                        /* 0x3C */
+	union MPI2_SCSI_IO_CDB_UNION  CDB;			/* 0x40 */
+	union RAID_CONTEXT_UNION RaidContext;  /* 0x60 */
+	union MPI2_SGE_IO_UNION       SGL;			/* 0x80 */
+};
+
+/*
+ * MPT RAID MFA IO Descriptor.
+ */
+struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
+	u32     RequestFlags:8;
+	u32     MessageAddress1:24;
+	u32     MessageAddress2;
+};
+
+/* Default Request Descriptor */
+struct MPI2_DEFAULT_REQUEST_DESCRIPTOR {
+	u8              RequestFlags;               /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	__le16		SMID;                       /* 0x02 */
+	__le16		LMID;                       /* 0x04 */
+	__le16		DescriptorTypeDependent;    /* 0x06 */
+};
+
+/* High Priority Request Descriptor */
+struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR {
+	u8              RequestFlags;               /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	__le16		SMID;                       /* 0x02 */
+	__le16		LMID;                       /* 0x04 */
+	__le16		Reserved1;                  /* 0x06 */
+};
+
+/* SCSI IO Request Descriptor */
+struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR {
+	u8              RequestFlags;               /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	__le16		SMID;                       /* 0x02 */
+	__le16		LMID;                       /* 0x04 */
+	__le16		DevHandle;                  /* 0x06 */
+};
+
+/* SCSI Target Request Descriptor */
+struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR {
+	u8              RequestFlags;               /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	__le16		SMID;                       /* 0x02 */
+	__le16		LMID;                       /* 0x04 */
+	__le16		IoIndex;                    /* 0x06 */
+};
+
+/* RAID Accelerator Request Descriptor */
+struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR {
+	u8              RequestFlags;               /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	__le16		SMID;                       /* 0x02 */
+	__le16		LMID;                       /* 0x04 */
+	__le16		Reserved;                   /* 0x06 */
+};
+
+/* union of Request Descriptors */
+union MEGASAS_REQUEST_DESCRIPTOR_UNION {
+	struct MPI2_DEFAULT_REQUEST_DESCRIPTOR             Default;
+	struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR       HighPriority;
+	struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR             SCSIIO;
+	struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR         SCSITarget;
+	struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR          RAIDAccelerator;
+	struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR      MFAIo;
+	union {
+		struct {
+			__le32 low;
+			__le32 high;
+		} u;
+		__le64 Words;
+	};
+};
+
+/* Default Reply Descriptor */
+struct MPI2_DEFAULT_REPLY_DESCRIPTOR {
+	u8              ReplyFlags;                 /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	__le16		DescriptorTypeDependent1;   /* 0x02 */
+	__le32		DescriptorTypeDependent2;   /* 0x04 */
+};
+
+/* Address Reply Descriptor */
+struct MPI2_ADDRESS_REPLY_DESCRIPTOR {
+	u8              ReplyFlags;                 /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	__le16		SMID;                       /* 0x02 */
+	__le32		ReplyFrameAddress;          /* 0x04 */
+};
+
+/* SCSI IO Success Reply Descriptor */
+struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR {
+	u8              ReplyFlags;                 /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	__le16		SMID;                       /* 0x02 */
+	__le16		TaskTag;                    /* 0x04 */
+	__le16		Reserved1;                  /* 0x06 */
+};
+
+/* TargetAssist Success Reply Descriptor */
+struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR {
+	u8              ReplyFlags;                 /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	__le16		SMID;                       /* 0x02 */
+	u8              SequenceNumber;             /* 0x04 */
+	u8              Reserved1;                  /* 0x05 */
+	__le16		IoIndex;                    /* 0x06 */
+};
+
+/* Target Command Buffer Reply Descriptor */
+struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR {
+	u8              ReplyFlags;                 /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	u8              VP_ID;                      /* 0x02 */
+	u8              Flags;                      /* 0x03 */
+	__le16		InitiatorDevHandle;         /* 0x04 */
+	__le16		IoIndex;                    /* 0x06 */
+};
+
+/* RAID Accelerator Success Reply Descriptor */
+struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR {
+	u8              ReplyFlags;                 /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	__le16		SMID;                       /* 0x02 */
+	__le32		Reserved;                   /* 0x04 */
+};
+
+/* union of Reply Descriptors */
+union MPI2_REPLY_DESCRIPTORS_UNION {
+	struct MPI2_DEFAULT_REPLY_DESCRIPTOR                   Default;
+	struct MPI2_ADDRESS_REPLY_DESCRIPTOR                   AddressReply;
+	struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR           SCSIIOSuccess;
+	struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
+	struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
+	struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
+	RAIDAcceleratorSuccess;
+	__le64                                             Words;
+};
+
+/* IOCInit Request message */
+struct MPI2_IOC_INIT_REQUEST {
+	u8                      WhoInit;                        /* 0x00 */
+	u8                      Reserved1;                      /* 0x01 */
+	u8                      ChainOffset;                    /* 0x02 */
+	u8                      Function;                       /* 0x03 */
+	__le16			Reserved2;                      /* 0x04 */
+	u8                      Reserved3;                      /* 0x06 */
+	u8                      MsgFlags;                       /* 0x07 */
+	u8                      VP_ID;                          /* 0x08 */
+	u8                      VF_ID;                          /* 0x09 */
+	__le16			Reserved4;                      /* 0x0A */
+	__le16			MsgVersion;                     /* 0x0C */
+	__le16			HeaderVersion;                  /* 0x0E */
+	u32                     Reserved5;                      /* 0x10 */
+	__le16			Reserved6;                      /* 0x14 */
+	u8                      HostPageSize;                   /* 0x16 */
+	u8                      HostMSIxVectors;                /* 0x17 */
+	__le16			Reserved8;                      /* 0x18 */
+	__le16			SystemRequestFrameSize;         /* 0x1A */
+	__le16			ReplyDescriptorPostQueueDepth;  /* 0x1C */
+	__le16			ReplyFreeQueueDepth;            /* 0x1E */
+	__le32			SenseBufferAddressHigh;         /* 0x20 */
+	__le32			SystemReplyAddressHigh;         /* 0x24 */
+	__le64			SystemRequestFrameBaseAddress;  /* 0x28 */
+	__le64			ReplyDescriptorPostQueueAddress;/* 0x30 */
+	__le64			ReplyFreeQueueAddress;          /* 0x38 */
+	__le64			TimeStamp;                      /* 0x40 */
+};
+
+/* mrpriv defines */
+#define MR_PD_INVALID 0xFFFF
+#define MR_DEVHANDLE_INVALID 0xFFFF
+#define MAX_SPAN_DEPTH 8
+#define MAX_QUAD_DEPTH	MAX_SPAN_DEPTH
+#define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
+#define MAX_ROW_SIZE 32
+#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
+#define MAX_LOGICAL_DRIVES 64
+#define MAX_LOGICAL_DRIVES_EXT 256
+#define MAX_LOGICAL_DRIVES_DYN 512
+#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
+#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
+#define MAX_ARRAYS 128
+#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
+#define MAX_ARRAYS_EXT	256
+#define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT)
+#define MAX_API_ARRAYS_DYN 512
+#define MAX_PHYSICAL_DEVICES 256
+#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
+#define MAX_RAIDMAP_PHYSICAL_DEVICES_DYN 512
+#define MR_DCMD_LD_MAP_GET_INFO             0x0300e101
+#define MR_DCMD_SYSTEM_PD_MAP_GET_INFO      0x0200e102
+#define MR_DCMD_DRV_GET_TARGET_PROP         0x0200e103
+#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC  0x010e8485   /* SR-IOV HB alloc*/
+#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111   0x03200200
+#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS       0x03150200
+
+struct MR_DEV_HANDLE_INFO {
+	__le16	curDevHdl;
+	u8      validHandles;
+	u8      interfaceType;
+	__le16	devHandle[2];
+};
+
+struct MR_ARRAY_INFO {
+	__le16	pd[MAX_RAIDMAP_ROW_SIZE];
+};
+
+struct MR_QUAD_ELEMENT {
+	__le64     logStart;
+	__le64     logEnd;
+	__le64     offsetInSpan;
+	__le32     diff;
+	__le32     reserved1;
+};
+
+struct MR_SPAN_INFO {
+	__le32             noElements;
+	__le32             reserved1;
+	struct MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH];
+};
+
+struct MR_LD_SPAN {
+	__le64	 startBlk;
+	__le64	 numBlks;
+	__le16	 arrayRef;
+	u8       spanRowSize;
+	u8       spanRowDataSize;
+	u8       reserved[4];
+};
+
+struct MR_SPAN_BLOCK_INFO {
+	__le64          num_rows;
+	struct MR_LD_SPAN   span;
+	struct MR_SPAN_INFO block_span_info;
+};
+
+#define MR_RAID_CTX_CPUSEL_0		0
+#define MR_RAID_CTX_CPUSEL_1		1
+#define MR_RAID_CTX_CPUSEL_2		2
+#define MR_RAID_CTX_CPUSEL_3		3
+#define MR_RAID_CTX_CPUSEL_FCFS		0xF
+
+struct MR_CPU_AFFINITY_MASK {
+	union {
+		struct {
+#ifndef MFI_BIG_ENDIAN
+		u8 hw_path:1;
+		u8 cpu0:1;
+		u8 cpu1:1;
+		u8 cpu2:1;
+		u8 cpu3:1;
+		u8 reserved:3;
+#else
+		u8 reserved:3;
+		u8 cpu3:1;
+		u8 cpu2:1;
+		u8 cpu1:1;
+		u8 cpu0:1;
+		u8 hw_path:1;
+#endif
+		};
+		u8 core_mask;
+	};
+};
+
+struct MR_IO_AFFINITY {
+	union {
+		struct {
+			struct MR_CPU_AFFINITY_MASK pdRead;
+			struct MR_CPU_AFFINITY_MASK pdWrite;
+			struct MR_CPU_AFFINITY_MASK ldRead;
+			struct MR_CPU_AFFINITY_MASK ldWrite;
+			};
+		u32 word;
+		};
+	u8 maxCores;    /* Total cores + HW Path in ROC */
+	u8 reserved[3];
+};
+
+struct MR_LD_RAID {
+	struct {
+#if   defined(__BIG_ENDIAN_BITFIELD)
+		u32 reserved4:2;
+		u32 fp_cache_bypass_capable:1;
+		u32 fp_rmw_capable:1;
+		u32 disable_coalescing:1;
+		u32     fpBypassRegionLock:1;
+		u32     tmCapable:1;
+		u32	fpNonRWCapable:1;
+		u32     fpReadAcrossStripe:1;
+		u32     fpWriteAcrossStripe:1;
+		u32     fpReadCapable:1;
+		u32     fpWriteCapable:1;
+		u32     encryptionType:8;
+		u32     pdPiMode:4;
+		u32     ldPiMode:4;
+		u32 reserved5:2;
+		u32 ra_capable:1;
+		u32     fpCapable:1;
+#else
+		u32     fpCapable:1;
+		u32 ra_capable:1;
+		u32 reserved5:2;
+		u32     ldPiMode:4;
+		u32     pdPiMode:4;
+		u32     encryptionType:8;
+		u32     fpWriteCapable:1;
+		u32     fpReadCapable:1;
+		u32     fpWriteAcrossStripe:1;
+		u32     fpReadAcrossStripe:1;
+		u32	fpNonRWCapable:1;
+		u32     tmCapable:1;
+		u32     fpBypassRegionLock:1;
+		u32 disable_coalescing:1;
+		u32 fp_rmw_capable:1;
+		u32 fp_cache_bypass_capable:1;
+		u32 reserved4:2;
+#endif
+	} capability;
+	__le32     reserved6;
+	__le64     size;
+	u8      spanDepth;
+	u8      level;
+	u8      stripeShift;
+	u8      rowSize;
+	u8      rowDataSize;
+	u8      writeMode;
+	u8      PRL;
+	u8      SRL;
+	__le16     targetId;
+	u8      ldState;
+	u8      regTypeReqOnWrite;
+	u8      modFactor;
+	u8	regTypeReqOnRead;
+	__le16     seqNum;
+
+	struct {
+		u32 ldSyncRequired:1;
+		u32 reserved:31;
+	} flags;
+
+	u8	LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
+	u8	fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/
+	/* Ox2D This LD accept priority boost of this type */
+	u8 ld_accept_priority_type;
+	u8 reserved2[2];	        /* 0x2E - 0x2F */
+	/* 0x30 - 0x33, Logical block size for the LD */
+	u32 logical_block_length;
+	struct {
+#ifndef MFI_BIG_ENDIAN
+	/* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
+	u32 ld_pi_exp:4;
+	/* 0x34, LOGICAL BLOCKS PER PHYSICAL
+	 *  BLOCK EXPONENT from READ CAPACITY 16
+	 */
+	u32 ld_logical_block_exp:4;
+	u32 reserved1:24;           /* 0x34 */
+#else
+	u32 reserved1:24;           /* 0x34 */
+	/* 0x34, LOGICAL BLOCKS PER PHYSICAL
+	 *  BLOCK EXPONENT from READ CAPACITY 16
+	 */
+	u32 ld_logical_block_exp:4;
+	/* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
+	u32 ld_pi_exp:4;
+#endif
+	};                               /* 0x34 - 0x37 */
+	 /* 0x38 - 0x3f, This will determine which
+	  *  core will process LD IO and PD IO.
+	  */
+	struct MR_IO_AFFINITY cpuAffinity;
+     /* Bit definiations are specified by MR_IO_AFFINITY */
+	u8 reserved3[0x80 - 0x40];    /* 0x40 - 0x7f */
+};
+
+struct MR_LD_SPAN_MAP {
+	struct MR_LD_RAID          ldRaid;
+	u8                  dataArmMap[MAX_RAIDMAP_ROW_SIZE];
+	struct MR_SPAN_BLOCK_INFO  spanBlock[MAX_RAIDMAP_SPAN_DEPTH];
+};
+
+struct MR_FW_RAID_MAP {
+	__le32                 totalSize;
+	union {
+		struct {
+			__le32         maxLd;
+			__le32         maxSpanDepth;
+			__le32         maxRowSize;
+			__le32         maxPdCount;
+			__le32         maxArrays;
+		} validationInfo;
+		__le32             version[5];
+	};
+
+	__le32                 ldCount;
+	__le32                 Reserved1;
+	u8                  ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES+
+					MAX_RAIDMAP_VIEWS];
+	u8                  fpPdIoTimeoutSec;
+	u8                  reserved2[7];
+	struct MR_ARRAY_INFO       arMapInfo[MAX_RAIDMAP_ARRAYS];
+	struct MR_DEV_HANDLE_INFO  devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+	struct MR_LD_SPAN_MAP      ldSpanMap[1];
+};
+
+struct IO_REQUEST_INFO {
+	u64 ldStartBlock;
+	u32 numBlocks;
+	u16 ldTgtId;
+	u8 isRead;
+	__le16 devHandle;
+	u8 pd_interface;
+	u64 pdBlock;
+	u8 fpOkForIo;
+	u8 IoforUnevenSpan;
+	u8 start_span;
+	u8 do_fp_rlbypass;
+	u64 start_row;
+	u8  span_arm;	/* span[7:5], arm[4:0] */
+	u8  pd_after_lb;
+	u16 r1_alt_dev_handle; /* raid 1/10 only */
+	bool ra_capable;
+};
+
+struct MR_LD_TARGET_SYNC {
+	u8  targetId;
+	u8  reserved;
+	__le16 seqNum;
+};
+
+/*
+ * RAID Map descriptor Types.
+ * Each element should uniquely idetify one data structure in the RAID map
+ */
+enum MR_RAID_MAP_DESC_TYPE {
+	/* MR_DEV_HANDLE_INFO data */
+	RAID_MAP_DESC_TYPE_DEVHDL_INFO    = 0x0,
+	/* target to Ld num Index map */
+	RAID_MAP_DESC_TYPE_TGTID_INFO     = 0x1,
+	/* MR_ARRAY_INFO data */
+	RAID_MAP_DESC_TYPE_ARRAY_INFO     = 0x2,
+	/* MR_LD_SPAN_MAP data */
+	RAID_MAP_DESC_TYPE_SPAN_INFO      = 0x3,
+	RAID_MAP_DESC_TYPE_COUNT,
+};
+
+/*
+ * This table defines the offset, size and num elements  of each descriptor
+ * type in the RAID Map buffer
+ */
+struct MR_RAID_MAP_DESC_TABLE {
+	/* Raid map descriptor type */
+	u32 raid_map_desc_type;
+	/* Offset into the RAID map buffer where
+	 *  descriptor data is saved
+	 */
+	u32 raid_map_desc_offset;
+	/* total size of the
+	 * descriptor buffer
+	 */
+	u32 raid_map_desc_buffer_size;
+	/* Number of elements contained in the
+	 *  descriptor buffer
+	 */
+	u32 raid_map_desc_elements;
+};
+
+/*
+ * Dynamic Raid Map Structure.
+ */
+struct MR_FW_RAID_MAP_DYNAMIC {
+	u32 raid_map_size;   /* total size of RAID Map structure */
+	u32 desc_table_offset;/* Offset of desc table into RAID map*/
+	u32 desc_table_size;  /* Total Size of desc table */
+	/* Total Number of elements in the desc table */
+	u32 desc_table_num_elements;
+	u64	reserved1;
+	u32	reserved2[3];	/*future use */
+	/* timeout value used by driver in FP IOs */
+	u8 fp_pd_io_timeout_sec;
+	u8 reserved3[3];
+	/* when this seqNum increments, driver needs to
+	 *  release RMW buffers asap
+	 */
+	u32 rmw_fp_seq_num;
+	u16 ld_count;	/* count of lds. */
+	u16 ar_count;   /* count of arrays */
+	u16 span_count; /* count of spans */
+	u16 reserved4[3];
+/*
+ * The below structure of pointers is only to be used by the driver.
+ * This is added in the ,API to reduce the amount of code changes
+ * needed in the driver to support dynamic RAID map Firmware should
+ * not update these pointers while preparing the raid map
+ */
+	union {
+		struct {
+			struct MR_DEV_HANDLE_INFO  *dev_hndl_info;
+			u16 *ld_tgt_id_to_ld;
+			struct MR_ARRAY_INFO *ar_map_info;
+			struct MR_LD_SPAN_MAP *ld_span_map;
+			};
+		u64 ptr_structure_size[RAID_MAP_DESC_TYPE_COUNT];
+		};
+/*
+ * RAID Map descriptor table defines the layout of data in the RAID Map.
+ * The size of the descriptor table itself could change.
+ */
+	/* Variable Size descriptor Table. */
+	struct MR_RAID_MAP_DESC_TABLE
+			raid_map_desc_table[RAID_MAP_DESC_TYPE_COUNT];
+	/* Variable Size buffer containing all data */
+	u32 raid_map_desc_data[1];
+}; /* Dynamicaly sized RAID MAp structure */
+
+#define IEEE_SGE_FLAGS_ADDR_MASK            (0x03)
+#define IEEE_SGE_FLAGS_SYSTEM_ADDR          (0x00)
+#define IEEE_SGE_FLAGS_IOCDDR_ADDR          (0x01)
+#define IEEE_SGE_FLAGS_IOCPLB_ADDR          (0x02)
+#define IEEE_SGE_FLAGS_IOCPLBNTA_ADDR       (0x03)
+#define IEEE_SGE_FLAGS_CHAIN_ELEMENT        (0x80)
+#define IEEE_SGE_FLAGS_END_OF_LIST          (0x40)
+
+#define MPI2_SGE_FLAGS_SHIFT                (0x02)
+#define IEEE_SGE_FLAGS_FORMAT_MASK          (0xC0)
+#define IEEE_SGE_FLAGS_FORMAT_IEEE          (0x00)
+#define IEEE_SGE_FLAGS_FORMAT_NVME          (0x02)
+
+#define MPI26_IEEE_SGE_FLAGS_NSF_MASK           (0x1C)
+#define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE       (0x00)
+#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP       (0x08)
+#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL       (0x10)
+
+struct megasas_register_set;
+struct megasas_instance;
+
+union desc_word {
+	u64 word;
+	struct {
+		u32 low;
+		u32 high;
+	} u;
+};
+
+struct megasas_cmd_fusion {
+	struct MPI2_RAID_SCSI_IO_REQUEST	*io_request;
+	dma_addr_t			io_request_phys_addr;
+
+	union MPI2_SGE_IO_UNION	*sg_frame;
+	dma_addr_t		sg_frame_phys_addr;
+
+	u8 *sense;
+	dma_addr_t sense_phys_addr;
+
+	struct list_head list;
+	struct scsi_cmnd *scmd;
+	struct megasas_instance *instance;
+
+	u8 retry_for_fw_reset;
+	union MEGASAS_REQUEST_DESCRIPTOR_UNION  *request_desc;
+
+	/*
+	 * Context for a MFI frame.
+	 * Used to get the mfi cmd from list when a MFI cmd is completed
+	 */
+	u32 sync_cmd_idx;
+	u32 index;
+	u8 pd_r1_lb;
+	struct completion done;
+	u8 pd_interface;
+	u16 r1_alt_dev_handle; /* raid 1/10 only*/
+	bool cmd_completed;  /* raid 1/10 fp writes status holder */
+
+};
+
+struct LD_LOAD_BALANCE_INFO {
+	u8	loadBalanceFlag;
+	u8	reserved1;
+	atomic_t     scsi_pending_cmds[MAX_PHYSICAL_DEVICES];
+	u64     last_accessed_block[MAX_PHYSICAL_DEVICES];
+};
+
+/* SPAN_SET is info caclulated from span info from Raid map per LD */
+typedef struct _LD_SPAN_SET {
+	u64  log_start_lba;
+	u64  log_end_lba;
+	u64  span_row_start;
+	u64  span_row_end;
+	u64  data_strip_start;
+	u64  data_strip_end;
+	u64  data_row_start;
+	u64  data_row_end;
+	u8   strip_offset[MAX_SPAN_DEPTH];
+	u32    span_row_data_width;
+	u32    diff;
+	u32    reserved[2];
+} LD_SPAN_SET, *PLD_SPAN_SET;
+
+typedef struct LOG_BLOCK_SPAN_INFO {
+	LD_SPAN_SET  span_set[MAX_SPAN_DEPTH];
+} LD_SPAN_INFO, *PLD_SPAN_INFO;
+
+struct MR_FW_RAID_MAP_ALL {
+	struct MR_FW_RAID_MAP raidMap;
+	struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
+} __attribute__ ((packed));
+
+struct MR_DRV_RAID_MAP {
+	/* total size of this structure, including this field.
+	 * This feild will be manupulated by driver for ext raid map,
+	 * else pick the value from firmware raid map.
+	 */
+	__le32                 totalSize;
+
+	union {
+	struct {
+		__le32         maxLd;
+		__le32         maxSpanDepth;
+		__le32         maxRowSize;
+		__le32         maxPdCount;
+		__le32         maxArrays;
+	} validationInfo;
+	__le32             version[5];
+	};
+
+	/* timeout value used by driver in FP IOs*/
+	u8                  fpPdIoTimeoutSec;
+	u8                  reserved2[7];
+
+	__le16                 ldCount;
+	__le16                 arCount;
+	__le16                 spanCount;
+	__le16                 reserve3;
+
+	struct MR_DEV_HANDLE_INFO
+		devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES_DYN];
+	u16 ldTgtIdToLd[MAX_LOGICAL_DRIVES_DYN];
+	struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_DYN];
+	struct MR_LD_SPAN_MAP      ldSpanMap[1];
+
+};
+
+/* Driver raid map size is same as raid map ext
+ * MR_DRV_RAID_MAP_ALL is created to sync with old raid.
+ * And it is mainly for code re-use purpose.
+ */
+struct MR_DRV_RAID_MAP_ALL {
+
+	struct MR_DRV_RAID_MAP raidMap;
+	struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN - 1];
+} __packed;
+
+
+
+struct MR_FW_RAID_MAP_EXT {
+	/* Not usred in new map */
+	u32                 reserved;
+
+	union {
+	struct {
+		u32         maxLd;
+		u32         maxSpanDepth;
+		u32         maxRowSize;
+		u32         maxPdCount;
+		u32         maxArrays;
+	} validationInfo;
+	u32             version[5];
+	};
+
+	u8                  fpPdIoTimeoutSec;
+	u8                  reserved2[7];
+
+	__le16                 ldCount;
+	__le16                 arCount;
+	__le16                 spanCount;
+	__le16                 reserve3;
+
+	struct MR_DEV_HANDLE_INFO  devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+	u8                  ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
+	struct MR_ARRAY_INFO       arMapInfo[MAX_API_ARRAYS_EXT];
+	struct MR_LD_SPAN_MAP      ldSpanMap[MAX_LOGICAL_DRIVES_EXT];
+};
+
+/*
+ *  * define MR_PD_CFG_SEQ structure for system PDs
+ *   */
+struct MR_PD_CFG_SEQ {
+	u16 seqNum;
+	u16 devHandle;
+	struct {
+#if   defined(__BIG_ENDIAN_BITFIELD)
+		u8     reserved:7;
+		u8     tmCapable:1;
+#else
+		u8     tmCapable:1;
+		u8     reserved:7;
+#endif
+	} capability;
+	u8  reserved;
+	u16 pd_target_id;
+} __packed;
+
+struct MR_PD_CFG_SEQ_NUM_SYNC {
+	__le32 size;
+	__le32 count;
+	struct MR_PD_CFG_SEQ seq[1];
+} __packed;
+
+/* stream detection */
+struct STREAM_DETECT {
+	u64 next_seq_lba; /* next LBA to match sequential access */
+	struct megasas_cmd_fusion *first_cmd_fusion; /* first cmd in group */
+	struct megasas_cmd_fusion *last_cmd_fusion; /* last cmd in group */
+	u32 count_cmds_in_stream; /* count of host commands in this stream */
+	u16 num_sges_in_group; /* total number of SGEs in grouped IOs */
+	u8 is_read; /* SCSI OpCode for this stream */
+	u8 group_depth; /* total number of host commands in group */
+	/* TRUE if cannot add any more commands to this group */
+	bool group_flush;
+	u8 reserved[7]; /* pad to 64-bit alignment */
+};
+
+struct LD_STREAM_DETECT {
+	bool write_back; /* TRUE if WB, FALSE if WT */
+	bool fp_write_enabled;
+	bool members_ssds;
+	bool fp_cache_bypass_capable;
+	u32 mru_bit_map; /* bitmap used to track MRU and LRU stream indicies */
+	/* this is the array of stream detect structures (one per stream) */
+	struct STREAM_DETECT stream_track[MAX_STREAMS_TRACKED];
+};
+
+struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
+	u64 RDPQBaseAddress;
+	u32 Reserved1;
+	u32 Reserved2;
+};
+
+struct fusion_context {
+	struct megasas_cmd_fusion **cmd_list;
+	dma_addr_t req_frames_desc_phys;
+	u8 *req_frames_desc;
+
+	struct dma_pool *io_request_frames_pool;
+	dma_addr_t io_request_frames_phys;
+	u8 *io_request_frames;
+
+	struct dma_pool *sg_dma_pool;
+	struct dma_pool *sense_dma_pool;
+
+	dma_addr_t reply_frames_desc_phys[MAX_MSIX_QUEUES_FUSION];
+	union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc[MAX_MSIX_QUEUES_FUSION];
+	struct dma_pool *reply_frames_desc_pool;
+
+	u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION];
+
+	u32 reply_q_depth;
+	u32 request_alloc_sz;
+	u32 reply_alloc_sz;
+	u32 io_frames_alloc_sz;
+
+	struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY *rdpq_virt;
+	dma_addr_t rdpq_phys;
+	u16	max_sge_in_main_msg;
+	u16	max_sge_in_chain;
+
+	u8	chain_offset_io_request;
+	u8	chain_offset_mfi_pthru;
+
+	struct MR_FW_RAID_MAP_DYNAMIC *ld_map[2];
+	dma_addr_t ld_map_phys[2];
+
+	/*Non dma-able memory. Driver local copy.*/
+	struct MR_DRV_RAID_MAP_ALL *ld_drv_map[2];
+
+	u32 max_map_sz;
+	u32 current_map_sz;
+	u32 old_map_sz;
+	u32 new_map_sz;
+	u32 drv_map_sz;
+	u32 drv_map_pages;
+	struct MR_PD_CFG_SEQ_NUM_SYNC	*pd_seq_sync[JBOD_MAPS_COUNT];
+	dma_addr_t pd_seq_phys[JBOD_MAPS_COUNT];
+	u8 fast_path_io;
+	struct LD_LOAD_BALANCE_INFO *load_balance_info;
+	u32 load_balance_info_pages;
+	LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT];
+	struct LD_STREAM_DETECT **stream_detect_by_ld;
+};
+
+union desc_value {
+	__le64 word;
+	struct {
+		__le32 low;
+		__le32 high;
+	} u;
+};
+
+void megasas_free_cmds_fusion(struct megasas_instance *instance);
+int megasas_ioc_init_fusion(struct megasas_instance *instance);
+u8 megasas_get_map_info(struct megasas_instance *instance);
+int megasas_sync_map_info(struct megasas_instance *instance);
+void megasas_release_fusion(struct megasas_instance *instance);
+void megasas_reset_reply_desc(struct megasas_instance *instance);
+int megasas_check_mpio_paths(struct megasas_instance *instance,
+			      struct scsi_cmnd *scmd);
+void megasas_fusion_ocr_wq(struct work_struct *work);
+
+#endif /* _MEGARAID_SAS_FUSION_H_ */