[T106][ZXW-22]7520V3SCV2.01.01.02P42U09_VEC_V0.8_AP_VEC origin source commit

Change-Id: Ic6e05d89ecd62fc34f82b23dcf306c93764aec4b
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/Kconfig b/ap/os/linux/linux-3.4.x/drivers/target/Kconfig
new file mode 100644
index 0000000..b28794b
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/Kconfig
@@ -0,0 +1,36 @@
+
+menuconfig TARGET_CORE
+	tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
+	depends on SCSI && BLOCK
+	select CONFIGFS_FS
+	default n
+	help
+	Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
+	control path for target_core_mod.  This includes built-in TCM RAMDISK
+	subsystem logic for virtual LUN 0 access
+
+if TARGET_CORE
+
+config TCM_IBLOCK
+	tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK"
+	help
+	Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered
+	access to Linux/Block devices using BIO
+
+config TCM_FILEIO
+	tristate "TCM/FILEIO Subsystem Plugin for Linux/VFS"
+	help
+	Say Y here to enable the TCM/FILEIO subsystem plugin for buffered
+	access to Linux/VFS struct file or struct block_device
+
+config TCM_PSCSI
+	tristate "TCM/pSCSI Subsystem Plugin for Linux/SCSI"
+	help
+	Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
+	passthrough access to Linux/SCSI device
+
+source "drivers/target/loopback/Kconfig"
+source "drivers/target/tcm_fc/Kconfig"
+source "drivers/target/iscsi/Kconfig"
+
+endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/Makefile b/ap/os/linux/linux-3.4.x/drivers/target/Makefile
new file mode 100644
index 0000000..62e5405
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/Makefile
@@ -0,0 +1,27 @@
+
+target_core_mod-y		:= target_core_configfs.o \
+				   target_core_device.o \
+				   target_core_fabric_configfs.o \
+				   target_core_fabric_lib.o \
+				   target_core_hba.o \
+				   target_core_pr.o \
+				   target_core_alua.o \
+				   target_core_tmr.o \
+				   target_core_tpg.o \
+				   target_core_transport.o \
+				   target_core_cdb.o \
+				   target_core_ua.o \
+				   target_core_rd.o \
+				   target_core_stat.o
+
+obj-$(CONFIG_TARGET_CORE)	+= target_core_mod.o
+
+# Subsystem modules
+obj-$(CONFIG_TCM_IBLOCK)	+= target_core_iblock.o
+obj-$(CONFIG_TCM_FILEIO)	+= target_core_file.o
+obj-$(CONFIG_TCM_PSCSI)		+= target_core_pscsi.o
+
+# Fabric modules
+obj-$(CONFIG_LOOPBACK_TARGET)	+= loopback/
+obj-$(CONFIG_TCM_FC)		+= tcm_fc/
+obj-$(CONFIG_ISCSI_TARGET)	+= iscsi/
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/Kconfig b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/Kconfig
new file mode 100644
index 0000000..8345fb4
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/Kconfig
@@ -0,0 +1,9 @@
+config ISCSI_TARGET
+	tristate "Linux-iSCSI.org iSCSI Target Mode Stack"
+	depends on NET
+	select CRYPTO
+	select CRYPTO_CRC32C
+	select CRYPTO_CRC32C_INTEL if X86
+	help
+	Say M here to enable the ConfigFS enabled Linux-iSCSI.org iSCSI
+	Target Mode Stack.
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/Makefile b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/Makefile
new file mode 100644
index 0000000..5b9a2cf
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/Makefile
@@ -0,0 +1,20 @@
+iscsi_target_mod-y +=		iscsi_target_parameters.o \
+				iscsi_target_seq_pdu_list.o \
+				iscsi_target_tq.o \
+				iscsi_target_auth.o \
+				iscsi_target_datain_values.o \
+				iscsi_target_device.o \
+				iscsi_target_erl0.o \
+				iscsi_target_erl1.o \
+				iscsi_target_erl2.o \
+				iscsi_target_login.o \
+				iscsi_target_nego.o \
+				iscsi_target_nodeattrib.o \
+				iscsi_target_tmr.o \
+				iscsi_target_tpg.o \
+				iscsi_target_util.o \
+				iscsi_target.o \
+				iscsi_target_configfs.o \
+				iscsi_target_stat.o
+
+obj-$(CONFIG_ISCSI_TARGET)	+= iscsi_target_mod.o
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target.c b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target.c
new file mode 100644
index 0000000..56d02e0
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target.c
@@ -0,0 +1,4544 @@
+/*******************************************************************************
+ * This file contains main functions related to the iSCSI Target Core Driver.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/kthread.h>
+#include <linux/crypto.h>
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <linux/idr.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi_device.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_tq.h"
+#include "iscsi_target_configfs.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_login.h"
+#include "iscsi_target_tmr.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_stat.h"
+
+static LIST_HEAD(g_tiqn_list);
+static LIST_HEAD(g_np_list);
+static DEFINE_SPINLOCK(tiqn_lock);
+static DEFINE_MUTEX(np_lock);
+
+static struct idr tiqn_idr;
+struct idr sess_idr;
+struct mutex auth_id_lock;
+spinlock_t sess_idr_lock;
+
+struct iscsit_global *iscsit_global;
+
+struct kmem_cache *lio_cmd_cache;
+struct kmem_cache *lio_qr_cache;
+struct kmem_cache *lio_dr_cache;
+struct kmem_cache *lio_ooo_cache;
+struct kmem_cache *lio_r2t_cache;
+
+static int iscsit_handle_immediate_data(struct iscsi_cmd *,
+			unsigned char *buf, u32);
+static int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+
+struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
+{
+	struct iscsi_tiqn *tiqn = NULL;
+
+	spin_lock(&tiqn_lock);
+	list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
+		if (!strcmp(tiqn->tiqn, buf)) {
+
+			spin_lock(&tiqn->tiqn_state_lock);
+			if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
+				tiqn->tiqn_access_count++;
+				spin_unlock(&tiqn->tiqn_state_lock);
+				spin_unlock(&tiqn_lock);
+				return tiqn;
+			}
+			spin_unlock(&tiqn->tiqn_state_lock);
+		}
+	}
+	spin_unlock(&tiqn_lock);
+
+	return NULL;
+}
+
+static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn)
+{
+	spin_lock(&tiqn->tiqn_state_lock);
+	if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
+		tiqn->tiqn_state = TIQN_STATE_SHUTDOWN;
+		spin_unlock(&tiqn->tiqn_state_lock);
+		return 0;
+	}
+	spin_unlock(&tiqn->tiqn_state_lock);
+
+	return -1;
+}
+
+void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn)
+{
+	spin_lock(&tiqn->tiqn_state_lock);
+	tiqn->tiqn_access_count--;
+	spin_unlock(&tiqn->tiqn_state_lock);
+}
+
+/*
+ * Note that IQN formatting is expected to be done in userspace, and
+ * no explict IQN format checks are done here.
+ */
+struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
+{
+	struct iscsi_tiqn *tiqn = NULL;
+	int ret;
+
+	if (strlen(buf) >= ISCSI_IQN_LEN) {
+		pr_err("Target IQN exceeds %d bytes\n",
+				ISCSI_IQN_LEN);
+		return ERR_PTR(-EINVAL);
+	}
+
+	tiqn = kzalloc(sizeof(struct iscsi_tiqn), GFP_KERNEL);
+	if (!tiqn) {
+		pr_err("Unable to allocate struct iscsi_tiqn\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	sprintf(tiqn->tiqn, "%s", buf);
+	INIT_LIST_HEAD(&tiqn->tiqn_list);
+	INIT_LIST_HEAD(&tiqn->tiqn_tpg_list);
+	spin_lock_init(&tiqn->tiqn_state_lock);
+	spin_lock_init(&tiqn->tiqn_tpg_lock);
+	spin_lock_init(&tiqn->sess_err_stats.lock);
+	spin_lock_init(&tiqn->login_stats.lock);
+	spin_lock_init(&tiqn->logout_stats.lock);
+
+	if (!idr_pre_get(&tiqn_idr, GFP_KERNEL)) {
+		pr_err("idr_pre_get() for tiqn_idr failed\n");
+		kfree(tiqn);
+		return ERR_PTR(-ENOMEM);
+	}
+	tiqn->tiqn_state = TIQN_STATE_ACTIVE;
+
+	spin_lock(&tiqn_lock);
+	ret = idr_get_new(&tiqn_idr, NULL, &tiqn->tiqn_index);
+	if (ret < 0) {
+		pr_err("idr_get_new() failed for tiqn->tiqn_index\n");
+		spin_unlock(&tiqn_lock);
+		kfree(tiqn);
+		return ERR_PTR(ret);
+	}
+	list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
+	spin_unlock(&tiqn_lock);
+
+	pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
+
+	return tiqn;
+
+}
+
+static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn)
+{
+	/*
+	 * Wait for accesses to said struct iscsi_tiqn to end.
+	 */
+	spin_lock(&tiqn->tiqn_state_lock);
+	while (tiqn->tiqn_access_count != 0) {
+		spin_unlock(&tiqn->tiqn_state_lock);
+		msleep(10);
+		spin_lock(&tiqn->tiqn_state_lock);
+	}
+	spin_unlock(&tiqn->tiqn_state_lock);
+}
+
+void iscsit_del_tiqn(struct iscsi_tiqn *tiqn)
+{
+	/*
+	 * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN
+	 * while holding tiqn->tiqn_state_lock.  This means that all subsequent
+	 * attempts to access this struct iscsi_tiqn will fail from both transport
+	 * fabric and control code paths.
+	 */
+	if (iscsit_set_tiqn_shutdown(tiqn) < 0) {
+		pr_err("iscsit_set_tiqn_shutdown() failed\n");
+		return;
+	}
+
+	iscsit_wait_for_tiqn(tiqn);
+
+	spin_lock(&tiqn_lock);
+	list_del(&tiqn->tiqn_list);
+	idr_remove(&tiqn_idr, tiqn->tiqn_index);
+	spin_unlock(&tiqn_lock);
+
+	pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n",
+			tiqn->tiqn);
+	kfree(tiqn);
+}
+
+int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
+{
+	int ret;
+	/*
+	 * Determine if the network portal is accepting storage traffic.
+	 */
+	spin_lock_bh(&np->np_thread_lock);
+	if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
+		spin_unlock_bh(&np->np_thread_lock);
+		return -1;
+	}
+	if (np->np_login_tpg) {
+		pr_err("np->np_login_tpg() is not NULL!\n");
+		spin_unlock_bh(&np->np_thread_lock);
+		return -1;
+	}
+	spin_unlock_bh(&np->np_thread_lock);
+	/*
+	 * Determine if the portal group is accepting storage traffic.
+	 */
+	spin_lock_bh(&tpg->tpg_state_lock);
+	if (tpg->tpg_state != TPG_STATE_ACTIVE) {
+		spin_unlock_bh(&tpg->tpg_state_lock);
+		return -1;
+	}
+	spin_unlock_bh(&tpg->tpg_state_lock);
+
+	/*
+	 * Here we serialize access across the TIQN+TPG Tuple.
+	 */
+	ret = mutex_lock_interruptible(&tpg->np_login_lock);
+	if ((ret != 0) || signal_pending(current))
+		return -1;
+
+	spin_lock_bh(&np->np_thread_lock);
+	np->np_login_tpg = tpg;
+	spin_unlock_bh(&np->np_thread_lock);
+
+	return 0;
+}
+
+int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
+{
+	struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+	spin_lock_bh(&np->np_thread_lock);
+	np->np_login_tpg = NULL;
+	spin_unlock_bh(&np->np_thread_lock);
+
+	mutex_unlock(&tpg->np_login_lock);
+
+	if (tiqn)
+		iscsit_put_tiqn_for_login(tiqn);
+
+	return 0;
+}
+
+/*
+ * Called with mutex np_lock held
+ */
+static struct iscsi_np *iscsit_get_np(
+	struct __kernel_sockaddr_storage *sockaddr,
+	int network_transport)
+{
+	struct sockaddr_in *sock_in, *sock_in_e;
+	struct sockaddr_in6 *sock_in6, *sock_in6_e;
+	struct iscsi_np *np;
+	int ip_match = 0;
+	u16 port;
+
+	list_for_each_entry(np, &g_np_list, np_list) {
+		spin_lock_bh(&np->np_thread_lock);
+		if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
+			spin_unlock_bh(&np->np_thread_lock);
+			continue;
+		}
+
+		if (sockaddr->ss_family == AF_INET6) {
+			sock_in6 = (struct sockaddr_in6 *)sockaddr;
+			sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
+
+			if (!memcmp(&sock_in6->sin6_addr.in6_u,
+				    &sock_in6_e->sin6_addr.in6_u,
+				    sizeof(struct in6_addr)))
+				ip_match = 1;
+
+			port = ntohs(sock_in6->sin6_port);
+		} else {
+			sock_in = (struct sockaddr_in *)sockaddr;
+			sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
+
+			if (sock_in->sin_addr.s_addr ==
+			    sock_in_e->sin_addr.s_addr)
+				ip_match = 1;
+
+			port = ntohs(sock_in->sin_port);
+		}
+
+		if ((ip_match == 1) && (np->np_port == port) &&
+		    (np->np_network_transport == network_transport)) {
+			/*
+			 * Increment the np_exports reference count now to
+			 * prevent iscsit_del_np() below from being called
+			 * while iscsi_tpg_add_network_portal() is called.
+			 */
+			np->np_exports++;
+			spin_unlock_bh(&np->np_thread_lock);
+			return np;
+		}
+		spin_unlock_bh(&np->np_thread_lock);
+	}
+
+	return NULL;
+}
+
+struct iscsi_np *iscsit_add_np(
+	struct __kernel_sockaddr_storage *sockaddr,
+	char *ip_str,
+	int network_transport)
+{
+	struct sockaddr_in *sock_in;
+	struct sockaddr_in6 *sock_in6;
+	struct iscsi_np *np;
+	int ret;
+
+	mutex_lock(&np_lock);
+
+	/*
+	 * Locate the existing struct iscsi_np if already active..
+	 */
+	np = iscsit_get_np(sockaddr, network_transport);
+	if (np) {
+		mutex_unlock(&np_lock);
+		return np;
+	}
+
+	np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
+	if (!np) {
+		pr_err("Unable to allocate memory for struct iscsi_np\n");
+		mutex_unlock(&np_lock);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	np->np_flags |= NPF_IP_NETWORK;
+	if (sockaddr->ss_family == AF_INET6) {
+		sock_in6 = (struct sockaddr_in6 *)sockaddr;
+		snprintf(np->np_ip, IPV6_ADDRESS_SPACE, "%s", ip_str);
+		np->np_port = ntohs(sock_in6->sin6_port);
+	} else {
+		sock_in = (struct sockaddr_in *)sockaddr;
+		sprintf(np->np_ip, "%s", ip_str);
+		np->np_port = ntohs(sock_in->sin_port);
+	}
+
+	np->np_network_transport = network_transport;
+	spin_lock_init(&np->np_thread_lock);
+	init_completion(&np->np_restart_comp);
+	INIT_LIST_HEAD(&np->np_list);
+
+	ret = iscsi_target_setup_login_socket(np, sockaddr);
+	if (ret != 0) {
+		kfree(np);
+		mutex_unlock(&np_lock);
+		return ERR_PTR(ret);
+	}
+
+	np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np");
+	if (IS_ERR(np->np_thread)) {
+		pr_err("Unable to create kthread: iscsi_np\n");
+		ret = PTR_ERR(np->np_thread);
+		kfree(np);
+		mutex_unlock(&np_lock);
+		return ERR_PTR(ret);
+	}
+	/*
+	 * Increment the np_exports reference count now to prevent
+	 * iscsit_del_np() below from being run while a new call to
+	 * iscsi_tpg_add_network_portal() for a matching iscsi_np is
+	 * active.  We don't need to hold np->np_thread_lock at this
+	 * point because iscsi_np has not been added to g_np_list yet.
+	 */
+	np->np_exports = 1;
+	np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+
+	list_add_tail(&np->np_list, &g_np_list);
+	mutex_unlock(&np_lock);
+
+	pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
+		np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
+		"TCP" : "SCTP");
+
+	return np;
+}
+
+int iscsit_reset_np_thread(
+	struct iscsi_np *np,
+	struct iscsi_tpg_np *tpg_np,
+	struct iscsi_portal_group *tpg)
+{
+	spin_lock_bh(&np->np_thread_lock);
+	if (tpg && tpg_np) {
+		/*
+		 * The reset operation need only be performed when the
+		 * passed struct iscsi_portal_group has a login in progress
+		 * to one of the network portals.
+		 */
+		if (tpg_np->tpg_np->np_login_tpg != tpg) {
+			spin_unlock_bh(&np->np_thread_lock);
+			return 0;
+		}
+	}
+	if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
+		spin_unlock_bh(&np->np_thread_lock);
+		return 0;
+	}
+	np->np_thread_state = ISCSI_NP_THREAD_RESET;
+
+	if (np->np_thread) {
+		spin_unlock_bh(&np->np_thread_lock);
+		send_sig(SIGINT, np->np_thread, 1);
+		wait_for_completion(&np->np_restart_comp);
+		spin_lock_bh(&np->np_thread_lock);
+	}
+	spin_unlock_bh(&np->np_thread_lock);
+
+	return 0;
+}
+
+int iscsit_del_np_comm(struct iscsi_np *np)
+{
+	if (np->np_socket)
+		sock_release(np->np_socket);
+	return 0;
+}
+
+int iscsit_del_np(struct iscsi_np *np)
+{
+	spin_lock_bh(&np->np_thread_lock);
+	np->np_exports--;
+	if (np->np_exports) {
+		spin_unlock_bh(&np->np_thread_lock);
+		return 0;
+	}
+	np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN;
+	spin_unlock_bh(&np->np_thread_lock);
+
+	if (np->np_thread) {
+		/*
+		 * We need to send the signal to wakeup Linux/Net
+		 * which may be sleeping in sock_accept()..
+		 */
+		send_sig(SIGINT, np->np_thread, 1);
+		kthread_stop(np->np_thread);
+	}
+	iscsit_del_np_comm(np);
+
+	mutex_lock(&np_lock);
+	list_del(&np->np_list);
+	mutex_unlock(&np_lock);
+
+	pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
+		np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
+		"TCP" : "SCTP");
+
+	kfree(np);
+	return 0;
+}
+
+static int __init iscsi_target_init_module(void)
+{
+	int ret = 0;
+
+	pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
+
+	iscsit_global = kzalloc(sizeof(struct iscsit_global), GFP_KERNEL);
+	if (!iscsit_global) {
+		pr_err("Unable to allocate memory for iscsit_global\n");
+		return -1;
+	}
+	mutex_init(&auth_id_lock);
+	spin_lock_init(&sess_idr_lock);
+	idr_init(&tiqn_idr);
+	idr_init(&sess_idr);
+
+	ret = iscsi_target_register_configfs();
+	if (ret < 0)
+		goto out;
+
+	ret = iscsi_thread_set_init();
+	if (ret < 0)
+		goto configfs_out;
+
+	if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) !=
+			TARGET_THREAD_SET_COUNT) {
+		pr_err("iscsi_allocate_thread_sets() returned"
+			" unexpected value!\n");
+		goto ts_out1;
+	}
+
+	lio_cmd_cache = kmem_cache_create("lio_cmd_cache",
+			sizeof(struct iscsi_cmd), __alignof__(struct iscsi_cmd),
+			0, NULL);
+	if (!lio_cmd_cache) {
+		pr_err("Unable to kmem_cache_create() for"
+				" lio_cmd_cache\n");
+		goto ts_out2;
+	}
+
+	lio_qr_cache = kmem_cache_create("lio_qr_cache",
+			sizeof(struct iscsi_queue_req),
+			__alignof__(struct iscsi_queue_req), 0, NULL);
+	if (!lio_qr_cache) {
+		pr_err("nable to kmem_cache_create() for"
+				" lio_qr_cache\n");
+		goto cmd_out;
+	}
+
+	lio_dr_cache = kmem_cache_create("lio_dr_cache",
+			sizeof(struct iscsi_datain_req),
+			__alignof__(struct iscsi_datain_req), 0, NULL);
+	if (!lio_dr_cache) {
+		pr_err("Unable to kmem_cache_create() for"
+				" lio_dr_cache\n");
+		goto qr_out;
+	}
+
+	lio_ooo_cache = kmem_cache_create("lio_ooo_cache",
+			sizeof(struct iscsi_ooo_cmdsn),
+			__alignof__(struct iscsi_ooo_cmdsn), 0, NULL);
+	if (!lio_ooo_cache) {
+		pr_err("Unable to kmem_cache_create() for"
+				" lio_ooo_cache\n");
+		goto dr_out;
+	}
+
+	lio_r2t_cache = kmem_cache_create("lio_r2t_cache",
+			sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t),
+			0, NULL);
+	if (!lio_r2t_cache) {
+		pr_err("Unable to kmem_cache_create() for"
+				" lio_r2t_cache\n");
+		goto ooo_out;
+	}
+
+	if (iscsit_load_discovery_tpg() < 0)
+		goto r2t_out;
+
+	return ret;
+r2t_out:
+	kmem_cache_destroy(lio_r2t_cache);
+ooo_out:
+	kmem_cache_destroy(lio_ooo_cache);
+dr_out:
+	kmem_cache_destroy(lio_dr_cache);
+qr_out:
+	kmem_cache_destroy(lio_qr_cache);
+cmd_out:
+	kmem_cache_destroy(lio_cmd_cache);
+ts_out2:
+	iscsi_deallocate_thread_sets();
+ts_out1:
+	iscsi_thread_set_free();
+configfs_out:
+	iscsi_target_deregister_configfs();
+out:
+	kfree(iscsit_global);
+	return -ENOMEM;
+}
+
+static void __exit iscsi_target_cleanup_module(void)
+{
+	iscsi_deallocate_thread_sets();
+	iscsi_thread_set_free();
+	iscsit_release_discovery_tpg();
+	kmem_cache_destroy(lio_cmd_cache);
+	kmem_cache_destroy(lio_qr_cache);
+	kmem_cache_destroy(lio_dr_cache);
+	kmem_cache_destroy(lio_ooo_cache);
+	kmem_cache_destroy(lio_r2t_cache);
+
+	iscsi_target_deregister_configfs();
+
+	kfree(iscsit_global);
+}
+
+int iscsit_add_reject(
+	u8 reason,
+	int fail_conn,
+	unsigned char *buf,
+	struct iscsi_conn *conn)
+{
+	struct iscsi_cmd *cmd;
+	struct iscsi_reject *hdr;
+	int ret;
+
+	cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+	if (!cmd)
+		return -1;
+
+	cmd->iscsi_opcode = ISCSI_OP_REJECT;
+	if (fail_conn)
+		cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
+
+	hdr	= (struct iscsi_reject *) cmd->pdu;
+	hdr->reason = reason;
+
+	cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
+	if (!cmd->buf_ptr) {
+		pr_err("Unable to allocate memory for cmd->buf_ptr\n");
+		iscsit_release_cmd(cmd);
+		return -1;
+	}
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+	spin_unlock_bh(&conn->cmd_lock);
+
+	cmd->i_state = ISTATE_SEND_REJECT;
+	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+	ret = wait_for_completion_interruptible(&cmd->reject_comp);
+	if (ret != 0)
+		return -1;
+
+	return (!fail_conn) ? 0 : -1;
+}
+
+int iscsit_add_reject_from_cmd(
+	u8 reason,
+	int fail_conn,
+	int add_to_conn,
+	unsigned char *buf,
+	struct iscsi_cmd *cmd)
+{
+	struct iscsi_conn *conn;
+	struct iscsi_reject *hdr;
+	int ret;
+
+	if (!cmd->conn) {
+		pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
+				cmd->init_task_tag);
+		return -1;
+	}
+	conn = cmd->conn;
+
+	cmd->iscsi_opcode = ISCSI_OP_REJECT;
+	if (fail_conn)
+		cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
+
+	hdr	= (struct iscsi_reject *) cmd->pdu;
+	hdr->reason = reason;
+
+	cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
+	if (!cmd->buf_ptr) {
+		pr_err("Unable to allocate memory for cmd->buf_ptr\n");
+		iscsit_release_cmd(cmd);
+		return -1;
+	}
+
+	if (add_to_conn) {
+		spin_lock_bh(&conn->cmd_lock);
+		list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+		spin_unlock_bh(&conn->cmd_lock);
+	}
+
+	cmd->i_state = ISTATE_SEND_REJECT;
+	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+	ret = wait_for_completion_interruptible(&cmd->reject_comp);
+	if (ret != 0)
+		return -1;
+
+	return (!fail_conn) ? 0 : -1;
+}
+
+/*
+ * Map some portion of the allocated scatterlist to an iovec, suitable for
+ * kernel sockets to copy data in/out. This handles both pages and slab-allocated
+ * buffers, since we have been tricky and mapped t_mem_sg to the buffer in
+ * either case (see iscsit_alloc_buffs)
+ */
+static int iscsit_map_iovec(
+	struct iscsi_cmd *cmd,
+	struct kvec *iov,
+	u32 data_offset,
+	u32 data_length)
+{
+	u32 i = 0;
+	struct scatterlist *sg;
+	unsigned int page_off;
+
+	/*
+	 * We have a private mapping of the allocated pages in t_mem_sg.
+	 * At this point, we also know each contains a page.
+	 */
+	sg = &cmd->t_mem_sg[data_offset / PAGE_SIZE];
+	page_off = (data_offset % PAGE_SIZE);
+
+	cmd->first_data_sg = sg;
+	cmd->first_data_sg_off = page_off;
+
+	while (data_length) {
+		u32 cur_len = min_t(u32, data_length, sg->length - page_off);
+
+		iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
+		iov[i].iov_len = cur_len;
+
+		data_length -= cur_len;
+		page_off = 0;
+		sg = sg_next(sg);
+		i++;
+	}
+
+	cmd->kmapped_nents = i;
+
+	return i;
+}
+
+static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
+{
+	u32 i;
+	struct scatterlist *sg;
+
+	sg = cmd->first_data_sg;
+
+	for (i = 0; i < cmd->kmapped_nents; i++)
+		kunmap(sg_page(&sg[i]));
+}
+
+static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
+{
+	struct iscsi_cmd *cmd;
+
+	conn->exp_statsn = exp_statsn;
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+		spin_lock(&cmd->istate_lock);
+		if ((cmd->i_state == ISTATE_SENT_STATUS) &&
+		    (cmd->stat_sn < exp_statsn)) {
+			cmd->i_state = ISTATE_REMOVE;
+			spin_unlock(&cmd->istate_lock);
+			iscsit_add_cmd_to_immediate_queue(cmd, conn,
+						cmd->i_state);
+			continue;
+		}
+		spin_unlock(&cmd->istate_lock);
+	}
+	spin_unlock_bh(&conn->cmd_lock);
+}
+
+static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
+{
+	u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 :
+				cmd->se_cmd.t_data_nents;
+
+	iov_count += ISCSI_IOV_DATA_BUFFER;
+
+	cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL);
+	if (!cmd->iov_data) {
+		pr_err("Unable to allocate cmd->iov_data\n");
+		return -ENOMEM;
+	}
+
+	cmd->orig_iov_data_count = iov_count;
+	return 0;
+}
+
+static int iscsit_alloc_buffs(struct iscsi_cmd *cmd)
+{
+	struct scatterlist *sgl;
+	u32 length = cmd->se_cmd.data_length;
+	int nents = DIV_ROUND_UP(length, PAGE_SIZE);
+	int i = 0, j = 0, ret;
+	/*
+	 * If no SCSI payload is present, allocate the default iovecs used for
+	 * iSCSI PDU Header
+	 */
+	if (!length)
+		return iscsit_allocate_iovecs(cmd);
+
+	sgl = kzalloc(sizeof(*sgl) * nents, GFP_KERNEL);
+	if (!sgl)
+		return -ENOMEM;
+
+	sg_init_table(sgl, nents);
+
+	while (length) {
+		int buf_size = min_t(int, length, PAGE_SIZE);
+		struct page *page;
+
+		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+		if (!page)
+			goto page_alloc_failed;
+
+		sg_set_page(&sgl[i], page, buf_size, 0);
+
+		length -= buf_size;
+		i++;
+	}
+
+	cmd->t_mem_sg = sgl;
+	cmd->t_mem_sg_nents = nents;
+
+	/* BIDI ops not supported */
+
+	/* Tell the core about our preallocated memory */
+	transport_generic_map_mem_to_cmd(&cmd->se_cmd, sgl, nents, NULL, 0);
+	/*
+	 * Allocate iovecs for SCSI payload after transport_generic_map_mem_to_cmd
+	 * so that cmd->se_cmd.t_tasks_se_num has been set.
+	 */
+        ret = iscsit_allocate_iovecs(cmd);
+        if (ret < 0)
+		return -ENOMEM;
+
+	return 0;
+
+page_alloc_failed:
+	while (j < i)
+		__free_page(sg_page(&sgl[j++]));
+
+	kfree(sgl);
+	return -ENOMEM;
+}
+
+static int iscsit_handle_scsi_cmd(
+	struct iscsi_conn *conn,
+	unsigned char *buf)
+{
+	int	data_direction, cmdsn_ret = 0, immed_ret, ret, transport_ret;
+	int	dump_immediate_data = 0, send_check_condition = 0, payload_length;
+	struct iscsi_cmd	*cmd = NULL;
+	struct iscsi_scsi_req *hdr;
+
+	spin_lock_bh(&conn->sess->session_stats_lock);
+	conn->sess->cmd_pdus++;
+	if (conn->sess->se_sess->se_node_acl) {
+		spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
+		conn->sess->se_sess->se_node_acl->num_cmds++;
+		spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
+	}
+	spin_unlock_bh(&conn->sess->session_stats_lock);
+
+	hdr			= (struct iscsi_scsi_req *) buf;
+	payload_length		= ntoh24(hdr->dlength);
+	hdr->itt		= be32_to_cpu(hdr->itt);
+	hdr->data_length	= be32_to_cpu(hdr->data_length);
+	hdr->cmdsn		= be32_to_cpu(hdr->cmdsn);
+	hdr->exp_statsn		= be32_to_cpu(hdr->exp_statsn);
+
+	/* FIXME; Add checks for AdditionalHeaderSegment */
+
+	if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) &&
+	    !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
+		pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
+				" not set. Bad iSCSI Initiator.\n");
+		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+				buf, conn);
+	}
+
+	if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
+	     (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
+		/*
+		 * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
+		 * that adds support for RESERVE/RELEASE.  There is a bug
+		 * add with this new functionality that sets R/W bits when
+		 * neither CDB carries any READ or WRITE datapayloads.
+		 */
+		if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
+			hdr->flags &= ~ISCSI_FLAG_CMD_READ;
+			hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
+			goto done;
+		}
+
+		pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
+			" set when Expected Data Transfer Length is 0 for"
+			" CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
+		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+				buf, conn);
+	}
+done:
+
+	if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
+	    !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
+		pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
+			" MUST be set if Expected Data Transfer Length is not 0."
+			" Bad iSCSI Initiator\n");
+		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+				buf, conn);
+	}
+
+	if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
+	    (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
+		pr_err("Bidirectional operations not supported!\n");
+		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+				buf, conn);
+	}
+
+	if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
+		pr_err("Illegally set Immediate Bit in iSCSI Initiator"
+				" Scsi Command PDU.\n");
+		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+				buf, conn);
+	}
+
+	if (payload_length && !conn->sess->sess_ops->ImmediateData) {
+		pr_err("ImmediateData=No but DataSegmentLength=%u,"
+			" protocol error.\n", payload_length);
+		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+				buf, conn);
+	}
+
+	if ((hdr->data_length == payload_length) &&
+	    (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
+		pr_err("Expected Data Transfer Length and Length of"
+			" Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
+			" bit is not set protocol error\n");
+		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+				buf, conn);
+	}
+
+	if (payload_length > hdr->data_length) {
+		pr_err("DataSegmentLength: %u is greater than"
+			" EDTL: %u, protocol error.\n", payload_length,
+				hdr->data_length);
+		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+				buf, conn);
+	}
+
+	if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
+		pr_err("DataSegmentLength: %u is greater than"
+			" MaxRecvDataSegmentLength: %u, protocol error.\n",
+			payload_length, conn->conn_ops->MaxRecvDataSegmentLength);
+		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+				buf, conn);
+	}
+
+	if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
+		pr_err("DataSegmentLength: %u is greater than"
+			" FirstBurstLength: %u, protocol error.\n",
+			payload_length, conn->sess->sess_ops->FirstBurstLength);
+		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+					buf, conn);
+	}
+
+	data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
+			 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
+			  DMA_NONE;
+
+	cmd = iscsit_allocate_se_cmd(conn, hdr->data_length, data_direction,
+				(hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK));
+	if (!cmd)
+		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
+					buf, conn);
+
+	pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
+		" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
+		hdr->cmdsn, hdr->data_length, payload_length, conn->cid);
+
+	cmd->iscsi_opcode	= ISCSI_OP_SCSI_CMD;
+	cmd->i_state		= ISTATE_NEW_CMD;
+	cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+	cmd->immediate_data	= (payload_length) ? 1 : 0;
+	cmd->unsolicited_data	= ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) &&
+				     (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0);
+	if (cmd->unsolicited_data)
+		cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
+
+	conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
+	if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+		spin_lock_bh(&conn->sess->ttt_lock);
+		cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
+		if (cmd->targ_xfer_tag == 0xFFFFFFFF)
+			cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
+		spin_unlock_bh(&conn->sess->ttt_lock);
+	} else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
+		cmd->targ_xfer_tag = 0xFFFFFFFF;
+	cmd->cmd_sn		= hdr->cmdsn;
+	cmd->exp_stat_sn	= hdr->exp_statsn;
+	cmd->first_burst_len	= payload_length;
+
+	if (cmd->data_direction == DMA_FROM_DEVICE) {
+		struct iscsi_datain_req *dr;
+
+		dr = iscsit_allocate_datain_req();
+		if (!dr)
+			return iscsit_add_reject_from_cmd(
+					ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+					1, 1, buf, cmd);
+
+		iscsit_attach_datain_req(cmd, dr);
+	}
+
+	/*
+	 * The CDB is going to an se_device_t.
+	 */
+	ret = transport_lookup_cmd_lun(&cmd->se_cmd,
+				       scsilun_to_int(&hdr->lun));
+	if (ret < 0) {
+		if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) {
+			pr_debug("Responding to non-acl'ed,"
+				" non-existent or non-exported iSCSI LUN:"
+				" 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
+		}
+		send_check_condition = 1;
+		goto attach_cmd;
+	}
+	/*
+	 * The Initiator Node has access to the LUN (the addressing method
+	 * is handled inside of iscsit_get_lun_for_cmd()).  Now it's time to
+	 * allocate 1->N transport tasks (depending on sector count and
+	 * maximum request size the physical HBA(s) can handle.
+	 */
+	transport_ret = transport_generic_allocate_tasks(&cmd->se_cmd, hdr->cdb);
+	if (transport_ret == -ENOMEM) {
+		return iscsit_add_reject_from_cmd(
+				ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+				1, 1, buf, cmd);
+	} else if (transport_ret < 0) {
+		/*
+		 * Unsupported SAM Opcode.  CHECK_CONDITION will be sent
+		 * in iscsit_execute_cmd() during the CmdSN OOO Execution
+		 * Mechinism.
+		 */
+		send_check_condition = 1;
+	} else {
+		cmd->data_length = cmd->se_cmd.data_length;
+
+		if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
+			return iscsit_add_reject_from_cmd(
+				ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+				1, 1, buf, cmd);
+	}
+
+attach_cmd:
+	spin_lock_bh(&conn->cmd_lock);
+	list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+	spin_unlock_bh(&conn->cmd_lock);
+	/*
+	 * Check if we need to delay processing because of ALUA
+	 * Active/NonOptimized primary access state..
+	 */
+	core_alua_check_nonop_delay(&cmd->se_cmd);
+	/*
+	 * Allocate and setup SGL used with transport_generic_map_mem_to_cmd().
+	 * also call iscsit_allocate_iovecs()
+	 */
+	ret = iscsit_alloc_buffs(cmd);
+	if (ret < 0)
+		return iscsit_add_reject_from_cmd(
+				ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+				1, 0, buf, cmd);
+	/*
+	 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
+	 * the Immediate Bit is not set, and no Immediate
+	 * Data is attached.
+	 *
+	 * A PDU/CmdSN carrying Immediate Data can only
+	 * be processed after the DataCRC has passed.
+	 * If the DataCRC fails, the CmdSN MUST NOT
+	 * be acknowledged. (See below)
+	 */
+	if (!cmd->immediate_data) {
+		cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+		if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
+			return 0;
+		else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+			return iscsit_add_reject_from_cmd(
+				ISCSI_REASON_PROTOCOL_ERROR,
+				1, 0, buf, cmd);
+	}
+
+	iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+
+	/*
+	 * If no Immediate Data is attached, it's OK to return now.
+	 */
+	if (!cmd->immediate_data) {
+		if (send_check_condition)
+			return 0;
+
+		if (cmd->unsolicited_data) {
+			iscsit_set_dataout_sequence_values(cmd);
+
+			spin_lock_bh(&cmd->dataout_timeout_lock);
+			iscsit_start_dataout_timer(cmd, cmd->conn);
+			spin_unlock_bh(&cmd->dataout_timeout_lock);
+		}
+
+		return 0;
+	}
+
+	/*
+	 * Early CHECK_CONDITIONs never make it to the transport processing
+	 * thread.  They are processed in CmdSN order by
+	 * iscsit_check_received_cmdsn() below.
+	 */
+	if (send_check_condition) {
+		immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
+		dump_immediate_data = 1;
+		goto after_immediate_data;
+	}
+	/*
+	 * Call directly into transport_generic_new_cmd() to perform
+	 * the backend memory allocation.
+	 */
+	ret = transport_generic_new_cmd(&cmd->se_cmd);
+	if (ret < 0) {
+		immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
+		dump_immediate_data = 1;
+		goto after_immediate_data;
+	}
+
+	immed_ret = iscsit_handle_immediate_data(cmd, buf, payload_length);
+after_immediate_data:
+	if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
+		/*
+		 * A PDU/CmdSN carrying Immediate Data passed
+		 * DataCRC, check against ExpCmdSN/MaxCmdSN if
+		 * Immediate Bit is not set.
+		 */
+		cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+		/*
+		 * Special case for Unsupported SAM WRITE Opcodes
+		 * and ImmediateData=Yes.
+		 */
+		if (dump_immediate_data) {
+			if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+				return -1;
+		} else if (cmd->unsolicited_data) {
+			iscsit_set_dataout_sequence_values(cmd);
+
+			spin_lock_bh(&cmd->dataout_timeout_lock);
+			iscsit_start_dataout_timer(cmd, cmd->conn);
+			spin_unlock_bh(&cmd->dataout_timeout_lock);
+		}
+
+		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+			return iscsit_add_reject_from_cmd(
+				ISCSI_REASON_PROTOCOL_ERROR,
+				1, 0, buf, cmd);
+
+	} else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
+		/*
+		 * Immediate Data failed DataCRC and ERL>=1,
+		 * silently drop this PDU and let the initiator
+		 * plug the CmdSN gap.
+		 *
+		 * FIXME: Send Unsolicited NOPIN with reserved
+		 * TTT here to help the initiator figure out
+		 * the missing CmdSN, although they should be
+		 * intelligent enough to determine the missing
+		 * CmdSN and issue a retry to plug the sequence.
+		 */
+		cmd->i_state = ISTATE_REMOVE;
+		iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
+	} else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
+		return -1;
+
+	return 0;
+}
+
+static u32 iscsit_do_crypto_hash_sg(
+	struct hash_desc *hash,
+	struct iscsi_cmd *cmd,
+	u32 data_offset,
+	u32 data_length,
+	u32 padding,
+	u8 *pad_bytes)
+{
+	u32 data_crc;
+	u32 i;
+	struct scatterlist *sg;
+	unsigned int page_off;
+
+	crypto_hash_init(hash);
+
+	sg = cmd->first_data_sg;
+	page_off = cmd->first_data_sg_off;
+
+	i = 0;
+	while (data_length) {
+		u32 cur_len = min_t(u32, data_length, (sg[i].length - page_off));
+
+		crypto_hash_update(hash, &sg[i], cur_len);
+
+		data_length -= cur_len;
+		page_off = 0;
+		i++;
+	}
+
+	if (padding) {
+		struct scatterlist pad_sg;
+
+		sg_init_one(&pad_sg, pad_bytes, padding);
+		crypto_hash_update(hash, &pad_sg, padding);
+	}
+	crypto_hash_final(hash, (u8 *) &data_crc);
+
+	return data_crc;
+}
+
+static void iscsit_do_crypto_hash_buf(
+	struct hash_desc *hash,
+	unsigned char *buf,
+	u32 payload_length,
+	u32 padding,
+	u8 *pad_bytes,
+	u8 *data_crc)
+{
+	struct scatterlist sg;
+
+	crypto_hash_init(hash);
+
+	sg_init_one(&sg, buf, payload_length);
+	crypto_hash_update(hash, &sg, payload_length);
+
+	if (padding) {
+		sg_init_one(&sg, pad_bytes, padding);
+		crypto_hash_update(hash, &sg, padding);
+	}
+	crypto_hash_final(hash, data_crc);
+}
+
+static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
+{
+	int iov_ret, ooo_cmdsn = 0, ret;
+	u8 data_crc_failed = 0;
+	u32 checksum, iov_count = 0, padding = 0, rx_got = 0;
+	u32 rx_size = 0, payload_length;
+	struct iscsi_cmd *cmd = NULL;
+	struct se_cmd *se_cmd;
+	struct iscsi_data *hdr;
+	struct kvec *iov;
+	unsigned long flags;
+
+	hdr			= (struct iscsi_data *) buf;
+	payload_length		= ntoh24(hdr->dlength);
+	hdr->itt		= be32_to_cpu(hdr->itt);
+	hdr->ttt		= be32_to_cpu(hdr->ttt);
+	hdr->exp_statsn		= be32_to_cpu(hdr->exp_statsn);
+	hdr->datasn		= be32_to_cpu(hdr->datasn);
+	hdr->offset		= be32_to_cpu(hdr->offset);
+
+	if (!payload_length) {
+		pr_err("DataOUT payload is ZERO, protocol error.\n");
+		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+					buf, conn);
+	}
+
+	/* iSCSI write */
+	spin_lock_bh(&conn->sess->session_stats_lock);
+	conn->sess->rx_data_octets += payload_length;
+	if (conn->sess->se_sess->se_node_acl) {
+		spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
+		conn->sess->se_sess->se_node_acl->write_bytes += payload_length;
+		spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
+	}
+	spin_unlock_bh(&conn->sess->session_stats_lock);
+
+	if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
+		pr_err("DataSegmentLength: %u is greater than"
+			" MaxRecvDataSegmentLength: %u\n", payload_length,
+			conn->conn_ops->MaxRecvDataSegmentLength);
+		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+					buf, conn);
+	}
+
+	cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt,
+			payload_length);
+	if (!cmd)
+		return 0;
+
+	pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
+		" DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
+		hdr->itt, hdr->ttt, hdr->datasn, hdr->offset,
+		payload_length, conn->cid);
+
+	if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
+		pr_err("Command ITT: 0x%08x received DataOUT after"
+			" last DataOUT received, dumping payload\n",
+			cmd->init_task_tag);
+		return iscsit_dump_data_payload(conn, payload_length, 1);
+	}
+
+	if (cmd->data_direction != DMA_TO_DEVICE) {
+		pr_err("Command ITT: 0x%08x received DataOUT for a"
+			" NON-WRITE command.\n", cmd->init_task_tag);
+		return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
+				1, 0, buf, cmd);
+	}
+	se_cmd = &cmd->se_cmd;
+	iscsit_mod_dataout_timer(cmd);
+
+	if ((hdr->offset + payload_length) > cmd->data_length) {
+		pr_err("DataOut Offset: %u, Length %u greater than"
+			" iSCSI Command EDTL %u, protocol error.\n",
+			hdr->offset, payload_length, cmd->data_length);
+		return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
+				1, 0, buf, cmd);
+	}
+
+	if (cmd->unsolicited_data) {
+		int dump_unsolicited_data = 0;
+
+		if (conn->sess->sess_ops->InitialR2T) {
+			pr_err("Received unexpected unsolicited data"
+				" while InitialR2T=Yes, protocol error.\n");
+			transport_send_check_condition_and_sense(&cmd->se_cmd,
+					TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
+			return -1;
+		}
+		/*
+		 * Special case for dealing with Unsolicited DataOUT
+		 * and Unsupported SAM WRITE Opcodes and SE resource allocation
+		 * failures;
+		 */
+
+		/* Something's amiss if we're not in WRITE_PENDING state... */
+		spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+		WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING);
+		spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+		spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+		if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
+		     (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
+			dump_unsolicited_data = 1;
+		spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+		if (dump_unsolicited_data) {
+			/*
+			 * Check if a delayed TASK_ABORTED status needs to
+			 * be sent now if the ISCSI_FLAG_CMD_FINAL has been
+			 * received with the unsolicitied data out.
+			 */
+			if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
+				iscsit_stop_dataout_timer(cmd);
+
+			transport_check_aborted_status(se_cmd,
+					(hdr->flags & ISCSI_FLAG_CMD_FINAL));
+			return iscsit_dump_data_payload(conn, payload_length, 1);
+		}
+	} else {
+		/*
+		 * For the normal solicited data path:
+		 *
+		 * Check for a delayed TASK_ABORTED status and dump any
+		 * incoming data out payload if one exists.  Also, when the
+		 * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current
+		 * data out sequence, we decrement outstanding_r2ts.  Once
+		 * outstanding_r2ts reaches zero, go ahead and send the delayed
+		 * TASK_ABORTED status.
+		 */
+		if (se_cmd->transport_state & CMD_T_ABORTED) {
+			if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
+				if (--cmd->outstanding_r2ts < 1) {
+					iscsit_stop_dataout_timer(cmd);
+					transport_check_aborted_status(
+							se_cmd, 1);
+				}
+
+			return iscsit_dump_data_payload(conn, payload_length, 1);
+		}
+	}
+	/*
+	 * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and
+	 * within-command recovery checks before receiving the payload.
+	 */
+	ret = iscsit_check_pre_dataout(cmd, buf);
+	if (ret == DATAOUT_WITHIN_COMMAND_RECOVERY)
+		return 0;
+	else if (ret == DATAOUT_CANNOT_RECOVER)
+		return -1;
+
+	rx_size += payload_length;
+	iov = &cmd->iov_data[0];
+
+	iov_ret = iscsit_map_iovec(cmd, iov, hdr->offset, payload_length);
+	if (iov_ret < 0)
+		return -1;
+
+	iov_count += iov_ret;
+
+	padding = ((-payload_length) & 3);
+	if (padding != 0) {
+		iov[iov_count].iov_base	= cmd->pad_bytes;
+		iov[iov_count++].iov_len = padding;
+		rx_size += padding;
+		pr_debug("Receiving %u padding bytes.\n", padding);
+	}
+
+	if (conn->conn_ops->DataDigest) {
+		iov[iov_count].iov_base = &checksum;
+		iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+		rx_size += ISCSI_CRC_LEN;
+	}
+
+	rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
+
+	iscsit_unmap_iovec(cmd);
+
+	if (rx_got != rx_size)
+		return -1;
+
+	if (conn->conn_ops->DataDigest) {
+		u32 data_crc;
+
+		data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
+						    hdr->offset, payload_length, padding,
+						    cmd->pad_bytes);
+
+		if (checksum != data_crc) {
+			pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
+				" DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
+				" does not match computed 0x%08x\n",
+				hdr->itt, hdr->offset, payload_length,
+				hdr->datasn, checksum, data_crc);
+			data_crc_failed = 1;
+		} else {
+			pr_debug("Got CRC32C DataDigest 0x%08x for"
+				" %u bytes of Data Out\n", checksum,
+				payload_length);
+		}
+	}
+	/*
+	 * Increment post receive data and CRC values or perform
+	 * within-command recovery.
+	 */
+	ret = iscsit_check_post_dataout(cmd, buf, data_crc_failed);
+	if ((ret == DATAOUT_NORMAL) || (ret == DATAOUT_WITHIN_COMMAND_RECOVERY))
+		return 0;
+	else if (ret == DATAOUT_SEND_R2T) {
+		iscsit_set_dataout_sequence_values(cmd);
+		iscsit_build_r2ts_for_cmd(cmd, conn, 0);
+	} else if (ret == DATAOUT_SEND_TO_TRANSPORT) {
+		/*
+		 * Handle extra special case for out of order
+		 * Unsolicited Data Out.
+		 */
+		spin_lock_bh(&cmd->istate_lock);
+		ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN);
+		cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
+		cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+		spin_unlock_bh(&cmd->istate_lock);
+
+		iscsit_stop_dataout_timer(cmd);
+		return (!ooo_cmdsn) ? transport_generic_handle_data(
+					&cmd->se_cmd) : 0;
+	} else /* DATAOUT_CANNOT_RECOVER */
+		return -1;
+
+	return 0;
+}
+
+static int iscsit_handle_nop_out(
+	struct iscsi_conn *conn,
+	unsigned char *buf)
+{
+	unsigned char *ping_data = NULL;
+	int cmdsn_ret, niov = 0, ret = 0, rx_got, rx_size;
+	u32 checksum, data_crc, padding = 0, payload_length;
+	struct iscsi_cmd *cmd = NULL;
+	struct kvec *iov = NULL;
+	struct iscsi_nopout *hdr;
+
+	hdr			= (struct iscsi_nopout *) buf;
+	payload_length		= ntoh24(hdr->dlength);
+	hdr->itt		= be32_to_cpu(hdr->itt);
+	hdr->ttt		= be32_to_cpu(hdr->ttt);
+	hdr->cmdsn		= be32_to_cpu(hdr->cmdsn);
+	hdr->exp_statsn		= be32_to_cpu(hdr->exp_statsn);
+
+	if ((hdr->itt == 0xFFFFFFFF) && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+		pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
+			" not set, protocol error.\n");
+		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+					buf, conn);
+	}
+
+	if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
+		pr_err("NOPOUT Ping Data DataSegmentLength: %u is"
+			" greater than MaxRecvDataSegmentLength: %u, protocol"
+			" error.\n", payload_length,
+			conn->conn_ops->MaxRecvDataSegmentLength);
+		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+					buf, conn);
+	}
+
+	pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%09x,"
+		" CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
+		(hdr->itt == 0xFFFFFFFF) ? "Response" : "Request",
+		hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
+		payload_length);
+	/*
+	 * This is not a response to a Unsolicited NopIN, which means
+	 * it can either be a NOPOUT ping request (with a valid ITT),
+	 * or a NOPOUT not requesting a NOPIN (with a reserved ITT).
+	 * Either way, make sure we allocate an struct iscsi_cmd, as both
+	 * can contain ping data.
+	 */
+	if (hdr->ttt == 0xFFFFFFFF) {
+		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+		if (!cmd)
+			return iscsit_add_reject(
+					ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+					1, buf, conn);
+
+		cmd->iscsi_opcode	= ISCSI_OP_NOOP_OUT;
+		cmd->i_state		= ISTATE_SEND_NOPIN;
+		cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ?
+						1 : 0);
+		conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
+		cmd->targ_xfer_tag	= 0xFFFFFFFF;
+		cmd->cmd_sn		= hdr->cmdsn;
+		cmd->exp_stat_sn	= hdr->exp_statsn;
+		cmd->data_direction	= DMA_NONE;
+	}
+
+	if (payload_length && (hdr->ttt == 0xFFFFFFFF)) {
+		rx_size = payload_length;
+		ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
+		if (!ping_data) {
+			pr_err("Unable to allocate memory for"
+				" NOPOUT ping data.\n");
+			ret = -1;
+			goto out;
+		}
+
+		iov = &cmd->iov_misc[0];
+		iov[niov].iov_base	= ping_data;
+		iov[niov++].iov_len	= payload_length;
+
+		padding = ((-payload_length) & 3);
+		if (padding != 0) {
+			pr_debug("Receiving %u additional bytes"
+				" for padding.\n", padding);
+			iov[niov].iov_base	= &cmd->pad_bytes;
+			iov[niov++].iov_len	= padding;
+			rx_size += padding;
+		}
+		if (conn->conn_ops->DataDigest) {
+			iov[niov].iov_base	= &checksum;
+			iov[niov++].iov_len	= ISCSI_CRC_LEN;
+			rx_size += ISCSI_CRC_LEN;
+		}
+
+		rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size);
+		if (rx_got != rx_size) {
+			ret = -1;
+			goto out;
+		}
+
+		if (conn->conn_ops->DataDigest) {
+			iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+					ping_data, payload_length,
+					padding, cmd->pad_bytes,
+					(u8 *)&data_crc);
+
+			if (checksum != data_crc) {
+				pr_err("Ping data CRC32C DataDigest"
+				" 0x%08x does not match computed 0x%08x\n",
+					checksum, data_crc);
+				if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+					pr_err("Unable to recover from"
+					" NOPOUT Ping DataCRC failure while in"
+						" ERL=0.\n");
+					ret = -1;
+					goto out;
+				} else {
+					/*
+					 * Silently drop this PDU and let the
+					 * initiator plug the CmdSN gap.
+					 */
+					pr_debug("Dropping NOPOUT"
+					" Command CmdSN: 0x%08x due to"
+					" DataCRC error.\n", hdr->cmdsn);
+					ret = 0;
+					goto out;
+				}
+			} else {
+				pr_debug("Got CRC32C DataDigest"
+				" 0x%08x for %u bytes of ping data.\n",
+					checksum, payload_length);
+			}
+		}
+
+		ping_data[payload_length] = '\0';
+		/*
+		 * Attach ping data to struct iscsi_cmd->buf_ptr.
+		 */
+		cmd->buf_ptr = ping_data;
+		cmd->buf_ptr_size = payload_length;
+
+		pr_debug("Got %u bytes of NOPOUT ping"
+			" data.\n", payload_length);
+		pr_debug("Ping Data: \"%s\"\n", ping_data);
+	}
+
+	if (hdr->itt != 0xFFFFFFFF) {
+		if (!cmd) {
+			pr_err("Checking CmdSN for NOPOUT,"
+				" but cmd is NULL!\n");
+			return -1;
+		}
+		/*
+		 * Initiator is expecting a NopIN ping reply,
+		 */
+		spin_lock_bh(&conn->cmd_lock);
+		list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+		spin_unlock_bh(&conn->cmd_lock);
+
+		iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+
+		if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
+			iscsit_add_cmd_to_response_queue(cmd, conn,
+					cmd->i_state);
+			return 0;
+		}
+
+		cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+		if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+			ret = 0;
+			goto ping_out;
+		}
+		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+			return iscsit_add_reject_from_cmd(
+					ISCSI_REASON_PROTOCOL_ERROR,
+					1, 0, buf, cmd);
+
+		return 0;
+	}
+
+	if (hdr->ttt != 0xFFFFFFFF) {
+		/*
+		 * This was a response to a unsolicited NOPIN ping.
+		 */
+		cmd = iscsit_find_cmd_from_ttt(conn, hdr->ttt);
+		if (!cmd)
+			return -1;
+
+		iscsit_stop_nopin_response_timer(conn);
+
+		cmd->i_state = ISTATE_REMOVE;
+		iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
+		iscsit_start_nopin_timer(conn);
+	} else {
+		/*
+		 * Initiator is not expecting a NOPIN is response.
+		 * Just ignore for now.
+		 *
+		 * iSCSI v19-91 10.18
+		 * "A NOP-OUT may also be used to confirm a changed
+		 *  ExpStatSN if another PDU will not be available
+		 *  for a long time."
+		 */
+		ret = 0;
+		goto out;
+	}
+
+	return 0;
+out:
+	if (cmd)
+		iscsit_release_cmd(cmd);
+ping_out:
+	kfree(ping_data);
+	return ret;
+}
+
+static int iscsit_handle_task_mgt_cmd(
+	struct iscsi_conn *conn,
+	unsigned char *buf)
+{
+	struct iscsi_cmd *cmd;
+	struct se_tmr_req *se_tmr;
+	struct iscsi_tmr_req *tmr_req;
+	struct iscsi_tm *hdr;
+	int out_of_order_cmdsn = 0;
+	int ret;
+	u8 function;
+
+	hdr			= (struct iscsi_tm *) buf;
+	hdr->itt		= be32_to_cpu(hdr->itt);
+	hdr->rtt		= be32_to_cpu(hdr->rtt);
+	hdr->cmdsn		= be32_to_cpu(hdr->cmdsn);
+	hdr->exp_statsn		= be32_to_cpu(hdr->exp_statsn);
+	hdr->refcmdsn		= be32_to_cpu(hdr->refcmdsn);
+	hdr->exp_datasn		= be32_to_cpu(hdr->exp_datasn);
+	hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
+	function = hdr->flags;
+
+	pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:"
+		" 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:"
+		" 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function,
+		hdr->rtt, hdr->refcmdsn, conn->cid);
+
+	if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
+	    ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
+	     (hdr->rtt != ISCSI_RESERVED_TAG))) {
+		pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n");
+		hdr->rtt = ISCSI_RESERVED_TAG;
+	}
+
+	if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) &&
+			!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+		pr_err("Task Management Request TASK_REASSIGN not"
+			" issued as immediate command, bad iSCSI Initiator"
+				"implementation\n");
+		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+					buf, conn);
+	}
+	if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
+	    (hdr->refcmdsn != ISCSI_RESERVED_TAG))
+		hdr->refcmdsn = ISCSI_RESERVED_TAG;
+
+	cmd = iscsit_allocate_se_cmd_for_tmr(conn, function);
+	if (!cmd)
+		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+					1, buf, conn);
+
+	cmd->iscsi_opcode	= ISCSI_OP_SCSI_TMFUNC;
+	cmd->i_state		= ISTATE_SEND_TASKMGTRSP;
+	cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+	cmd->init_task_tag	= hdr->itt;
+	cmd->targ_xfer_tag	= 0xFFFFFFFF;
+	cmd->cmd_sn		= hdr->cmdsn;
+	cmd->exp_stat_sn	= hdr->exp_statsn;
+	se_tmr			= cmd->se_cmd.se_tmr_req;
+	tmr_req			= cmd->tmr_req;
+	/*
+	 * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN
+	 */
+	if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
+		ret = transport_lookup_tmr_lun(&cmd->se_cmd,
+					       scsilun_to_int(&hdr->lun));
+		if (ret < 0) {
+			cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
+			goto attach;
+		}
+	}
+
+	switch (function) {
+	case ISCSI_TM_FUNC_ABORT_TASK:
+		se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
+		if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) {
+			cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			goto attach;
+		}
+		break;
+	case ISCSI_TM_FUNC_ABORT_TASK_SET:
+	case ISCSI_TM_FUNC_CLEAR_ACA:
+	case ISCSI_TM_FUNC_CLEAR_TASK_SET:
+	case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+		break;
+	case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+		if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
+			cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
+			goto attach;
+		}
+		break;
+	case ISCSI_TM_FUNC_TARGET_COLD_RESET:
+		if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
+			cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
+			goto attach;
+		}
+		break;
+	case ISCSI_TM_FUNC_TASK_REASSIGN:
+		se_tmr->response = iscsit_tmr_task_reassign(cmd, buf);
+		/*
+		 * Perform sanity checks on the ExpDataSN only if the
+		 * TASK_REASSIGN was successful.
+		 */
+		if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE)
+			break;
+
+		if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
+			return iscsit_add_reject_from_cmd(
+					ISCSI_REASON_BOOKMARK_INVALID, 1, 1,
+					buf, cmd);
+		break;
+	default:
+		pr_err("Unknown TMR function: 0x%02x, protocol"
+			" error.\n", function);
+		cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
+		goto attach;
+	}
+
+	if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
+	    (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
+		se_tmr->call_transport = 1;
+attach:
+	spin_lock_bh(&conn->cmd_lock);
+	list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+	spin_unlock_bh(&conn->cmd_lock);
+
+	if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+		int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+		if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
+			out_of_order_cmdsn = 1;
+		else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
+			return 0;
+		else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+			return iscsit_add_reject_from_cmd(
+					ISCSI_REASON_PROTOCOL_ERROR,
+					1, 0, buf, cmd);
+	}
+	iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+
+	if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE))
+		return 0;
+	/*
+	 * Found the referenced task, send to transport for processing.
+	 */
+	if (se_tmr->call_transport)
+		return transport_generic_handle_tmr(&cmd->se_cmd);
+
+	/*
+	 * Could not find the referenced LUN, task, or Task Management
+	 * command not authorized or supported.  Change state and
+	 * let the tx_thread send the response.
+	 *
+	 * For connection recovery, this is also the default action for
+	 * TMR TASK_REASSIGN.
+	 */
+	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+	return 0;
+}
+
+/* #warning FIXME: Support Text Command parameters besides SendTargets */
+static int iscsit_handle_text_cmd(
+	struct iscsi_conn *conn,
+	unsigned char *buf)
+{
+	char *text_ptr, *text_in;
+	int cmdsn_ret, niov = 0, rx_got, rx_size;
+	u32 checksum = 0, data_crc = 0, payload_length;
+	u32 padding = 0, pad_bytes = 0, text_length = 0;
+	struct iscsi_cmd *cmd;
+	struct kvec iov[3];
+	struct iscsi_text *hdr;
+
+	hdr			= (struct iscsi_text *) buf;
+	payload_length		= ntoh24(hdr->dlength);
+	hdr->itt		= be32_to_cpu(hdr->itt);
+	hdr->ttt		= be32_to_cpu(hdr->ttt);
+	hdr->cmdsn		= be32_to_cpu(hdr->cmdsn);
+	hdr->exp_statsn		= be32_to_cpu(hdr->exp_statsn);
+
+	if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
+		pr_err("Unable to accept text parameter length: %u"
+			"greater than MaxRecvDataSegmentLength %u.\n",
+		       payload_length, conn->conn_ops->MaxRecvDataSegmentLength);
+		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+					buf, conn);
+	}
+
+	pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
+		" ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
+		hdr->exp_statsn, payload_length);
+
+	rx_size = text_length = payload_length;
+	if (text_length) {
+		text_in = kzalloc(text_length, GFP_KERNEL);
+		if (!text_in) {
+			pr_err("Unable to allocate memory for"
+				" incoming text parameters\n");
+			return -1;
+		}
+
+		memset(iov, 0, 3 * sizeof(struct kvec));
+		iov[niov].iov_base	= text_in;
+		iov[niov++].iov_len	= text_length;
+
+		padding = ((-payload_length) & 3);
+		if (padding != 0) {
+			iov[niov].iov_base = &pad_bytes;
+			iov[niov++].iov_len  = padding;
+			rx_size += padding;
+			pr_debug("Receiving %u additional bytes"
+					" for padding.\n", padding);
+		}
+		if (conn->conn_ops->DataDigest) {
+			iov[niov].iov_base	= &checksum;
+			iov[niov++].iov_len	= ISCSI_CRC_LEN;
+			rx_size += ISCSI_CRC_LEN;
+		}
+
+		rx_got = rx_data(conn, &iov[0], niov, rx_size);
+		if (rx_got != rx_size) {
+			kfree(text_in);
+			return -1;
+		}
+
+		if (conn->conn_ops->DataDigest) {
+			iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+					text_in, text_length,
+					padding, (u8 *)&pad_bytes,
+					(u8 *)&data_crc);
+
+			if (checksum != data_crc) {
+				pr_err("Text data CRC32C DataDigest"
+					" 0x%08x does not match computed"
+					" 0x%08x\n", checksum, data_crc);
+				if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+					pr_err("Unable to recover from"
+					" Text Data digest failure while in"
+						" ERL=0.\n");
+					kfree(text_in);
+					return -1;
+				} else {
+					/*
+					 * Silently drop this PDU and let the
+					 * initiator plug the CmdSN gap.
+					 */
+					pr_debug("Dropping Text"
+					" Command CmdSN: 0x%08x due to"
+					" DataCRC error.\n", hdr->cmdsn);
+					kfree(text_in);
+					return 0;
+				}
+			} else {
+				pr_debug("Got CRC32C DataDigest"
+					" 0x%08x for %u bytes of text data.\n",
+						checksum, text_length);
+			}
+		}
+		text_in[text_length - 1] = '\0';
+		pr_debug("Successfully read %d bytes of text"
+				" data.\n", text_length);
+
+		if (strncmp("SendTargets", text_in, 11) != 0) {
+			pr_err("Received Text Data that is not"
+				" SendTargets, cannot continue.\n");
+			kfree(text_in);
+			return -1;
+		}
+		text_ptr = strchr(text_in, '=');
+		if (!text_ptr) {
+			pr_err("No \"=\" separator found in Text Data,"
+				"  cannot continue.\n");
+			kfree(text_in);
+			return -1;
+		}
+		if (strncmp("=All", text_ptr, 4) != 0) {
+			pr_err("Unable to locate All value for"
+				" SendTargets key,  cannot continue.\n");
+			kfree(text_in);
+			return -1;
+		}
+/*#warning Support SendTargets=(iSCSI Target Name/Nothing) values. */
+		kfree(text_in);
+	}
+
+	cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+	if (!cmd)
+		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+					1, buf, conn);
+
+	cmd->iscsi_opcode	= ISCSI_OP_TEXT;
+	cmd->i_state		= ISTATE_SEND_TEXTRSP;
+	cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+	conn->sess->init_task_tag = cmd->init_task_tag	= hdr->itt;
+	cmd->targ_xfer_tag	= 0xFFFFFFFF;
+	cmd->cmd_sn		= hdr->cmdsn;
+	cmd->exp_stat_sn	= hdr->exp_statsn;
+	cmd->data_direction	= DMA_NONE;
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+	spin_unlock_bh(&conn->cmd_lock);
+
+	iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+
+	if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+		cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+			return iscsit_add_reject_from_cmd(
+					ISCSI_REASON_PROTOCOL_ERROR,
+					1, 0, buf, cmd);
+
+		return 0;
+	}
+
+	return iscsit_execute_cmd(cmd, 0);
+}
+
+int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+	struct iscsi_conn *conn_p;
+	struct iscsi_session *sess = conn->sess;
+
+	pr_debug("Received logout request CLOSESESSION on CID: %hu"
+		" for SID: %u.\n", conn->cid, conn->sess->sid);
+
+	atomic_set(&sess->session_logout, 1);
+	atomic_set(&conn->conn_logout_remove, 1);
+	conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION;
+
+	iscsit_inc_conn_usage_count(conn);
+	iscsit_inc_session_usage_count(sess);
+
+	spin_lock_bh(&sess->conn_lock);
+	list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) {
+		if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN)
+			continue;
+
+		pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
+		conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT;
+	}
+	spin_unlock_bh(&sess->conn_lock);
+
+	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+	return 0;
+}
+
+int iscsit_logout_closeconnection(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+	struct iscsi_conn *l_conn;
+	struct iscsi_session *sess = conn->sess;
+
+	pr_debug("Received logout request CLOSECONNECTION for CID:"
+		" %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
+
+	/*
+	 * A Logout Request with a CLOSECONNECTION reason code for a CID
+	 * can arrive on a connection with a differing CID.
+	 */
+	if (conn->cid == cmd->logout_cid) {
+		spin_lock_bh(&conn->state_lock);
+		pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
+		conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
+
+		atomic_set(&conn->conn_logout_remove, 1);
+		conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION;
+		iscsit_inc_conn_usage_count(conn);
+
+		spin_unlock_bh(&conn->state_lock);
+	} else {
+		/*
+		 * Handle all different cid CLOSECONNECTION requests in
+		 * iscsit_logout_post_handler_diffcid() as to give enough
+		 * time for any non immediate command's CmdSN to be
+		 * acknowledged on the connection in question.
+		 *
+		 * Here we simply make sure the CID is still around.
+		 */
+		l_conn = iscsit_get_conn_from_cid(sess,
+				cmd->logout_cid);
+		if (!l_conn) {
+			cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
+			iscsit_add_cmd_to_response_queue(cmd, conn,
+					cmd->i_state);
+			return 0;
+		}
+
+		iscsit_dec_conn_usage_count(l_conn);
+	}
+
+	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+	return 0;
+}
+
+int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+	struct iscsi_session *sess = conn->sess;
+
+	pr_debug("Received explicit REMOVECONNFORRECOVERY logout for"
+		" CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
+
+	if (sess->sess_ops->ErrorRecoveryLevel != 2) {
+		pr_err("Received Logout Request REMOVECONNFORRECOVERY"
+			" while ERL!=2.\n");
+		cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED;
+		iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+		return 0;
+	}
+
+	if (conn->cid == cmd->logout_cid) {
+		pr_err("Received Logout Request REMOVECONNFORRECOVERY"
+			" with CID: %hu on CID: %hu, implementation error.\n",
+				cmd->logout_cid, conn->cid);
+		cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED;
+		iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+		return 0;
+	}
+
+	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+	return 0;
+}
+
+static int iscsit_handle_logout_cmd(
+	struct iscsi_conn *conn,
+	unsigned char *buf)
+{
+	int cmdsn_ret, logout_remove = 0;
+	u8 reason_code = 0;
+	struct iscsi_cmd *cmd;
+	struct iscsi_logout *hdr;
+	struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn);
+
+	hdr			= (struct iscsi_logout *) buf;
+	reason_code		= (hdr->flags & 0x7f);
+	hdr->itt		= be32_to_cpu(hdr->itt);
+	hdr->cid		= be16_to_cpu(hdr->cid);
+	hdr->cmdsn		= be32_to_cpu(hdr->cmdsn);
+	hdr->exp_statsn	= be32_to_cpu(hdr->exp_statsn);
+
+	if (tiqn) {
+		spin_lock(&tiqn->logout_stats.lock);
+		if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION)
+			tiqn->logout_stats.normal_logouts++;
+		else
+			tiqn->logout_stats.abnormal_logouts++;
+		spin_unlock(&tiqn->logout_stats.lock);
+	}
+
+	pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x"
+		" ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n",
+		hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code,
+		hdr->cid, conn->cid);
+
+	if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
+		pr_err("Received logout request on connection that"
+			" is not in logged in state, ignoring request.\n");
+		return 0;
+	}
+
+	cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+	if (!cmd)
+		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
+					buf, conn);
+
+	cmd->iscsi_opcode       = ISCSI_OP_LOGOUT;
+	cmd->i_state            = ISTATE_SEND_LOGOUTRSP;
+	cmd->immediate_cmd      = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+	conn->sess->init_task_tag = cmd->init_task_tag  = hdr->itt;
+	cmd->targ_xfer_tag      = 0xFFFFFFFF;
+	cmd->cmd_sn             = hdr->cmdsn;
+	cmd->exp_stat_sn        = hdr->exp_statsn;
+	cmd->logout_cid         = hdr->cid;
+	cmd->logout_reason      = reason_code;
+	cmd->data_direction     = DMA_NONE;
+
+	/*
+	 * We need to sleep in these cases (by returning 1) until the Logout
+	 * Response gets sent in the tx thread.
+	 */
+	if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) ||
+	   ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) &&
+	    (hdr->cid == conn->cid)))
+		logout_remove = 1;
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+	spin_unlock_bh(&conn->cmd_lock);
+
+	if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
+		iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+
+	/*
+	 * Immediate commands are executed, well, immediately.
+	 * Non-Immediate Logout Commands are executed in CmdSN order.
+	 */
+	if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
+		int ret = iscsit_execute_cmd(cmd, 0);
+
+		if (ret < 0)
+			return ret;
+	} else {
+		cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+		if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+			logout_remove = 0;
+		} else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
+			return iscsit_add_reject_from_cmd(
+				ISCSI_REASON_PROTOCOL_ERROR,
+				1, 0, buf, cmd);
+		}
+	}
+
+	return logout_remove;
+}
+
+static int iscsit_handle_snack(
+	struct iscsi_conn *conn,
+	unsigned char *buf)
+{
+	struct iscsi_snack *hdr;
+
+	hdr			= (struct iscsi_snack *) buf;
+	hdr->flags		&= ~ISCSI_FLAG_CMD_FINAL;
+	hdr->itt		= be32_to_cpu(hdr->itt);
+	hdr->ttt		= be32_to_cpu(hdr->ttt);
+	hdr->exp_statsn		= be32_to_cpu(hdr->exp_statsn);
+	hdr->begrun		= be32_to_cpu(hdr->begrun);
+	hdr->runlength		= be32_to_cpu(hdr->runlength);
+
+	pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:"
+		" 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x,"
+		" CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags,
+			hdr->begrun, hdr->runlength, conn->cid);
+
+	if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+		pr_err("Initiator sent SNACK request while in"
+			" ErrorRecoveryLevel=0.\n");
+		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+					buf, conn);
+	}
+	/*
+	 * SNACK_DATA and SNACK_R2T are both 0,  so check which function to
+	 * call from inside iscsi_send_recovery_datain_or_r2t().
+	 */
+	switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) {
+	case 0:
+		return iscsit_handle_recovery_datain_or_r2t(conn, buf,
+			hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength);
+	case ISCSI_FLAG_SNACK_TYPE_STATUS:
+		return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt,
+			hdr->begrun, hdr->runlength);
+	case ISCSI_FLAG_SNACK_TYPE_DATA_ACK:
+		return iscsit_handle_data_ack(conn, hdr->ttt, hdr->begrun,
+			hdr->runlength);
+	case ISCSI_FLAG_SNACK_TYPE_RDATA:
+		/* FIXME: Support R-Data SNACK */
+		pr_err("R-Data SNACK Not Supported.\n");
+		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+					buf, conn);
+	default:
+		pr_err("Unknown SNACK type 0x%02x, protocol"
+			" error.\n", hdr->flags & 0x0f);
+		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+					buf, conn);
+	}
+
+	return 0;
+}
+
+static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
+{
+	if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
+	    (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
+		wait_for_completion_interruptible_timeout(
+					&conn->rx_half_close_comp,
+					ISCSI_RX_THREAD_TCP_TIMEOUT * HZ);
+	}
+}
+
+static int iscsit_handle_immediate_data(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf,
+	u32 length)
+{
+	int iov_ret, rx_got = 0, rx_size = 0;
+	u32 checksum, iov_count = 0, padding = 0;
+	struct iscsi_conn *conn = cmd->conn;
+	struct kvec *iov;
+
+	iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, cmd->write_data_done, length);
+	if (iov_ret < 0)
+		return IMMEDIATE_DATA_CANNOT_RECOVER;
+
+	rx_size = length;
+	iov_count = iov_ret;
+	iov = &cmd->iov_data[0];
+
+	padding = ((-length) & 3);
+	if (padding != 0) {
+		iov[iov_count].iov_base	= cmd->pad_bytes;
+		iov[iov_count++].iov_len = padding;
+		rx_size += padding;
+	}
+
+	if (conn->conn_ops->DataDigest) {
+		iov[iov_count].iov_base		= &checksum;
+		iov[iov_count++].iov_len	= ISCSI_CRC_LEN;
+		rx_size += ISCSI_CRC_LEN;
+	}
+
+	rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
+
+	iscsit_unmap_iovec(cmd);
+
+	if (rx_got != rx_size) {
+		iscsit_rx_thread_wait_for_tcp(conn);
+		return IMMEDIATE_DATA_CANNOT_RECOVER;
+	}
+
+	if (conn->conn_ops->DataDigest) {
+		u32 data_crc;
+
+		data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
+						    cmd->write_data_done, length, padding,
+						    cmd->pad_bytes);
+
+		if (checksum != data_crc) {
+			pr_err("ImmediateData CRC32C DataDigest 0x%08x"
+				" does not match computed 0x%08x\n", checksum,
+				data_crc);
+
+			if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+				pr_err("Unable to recover from"
+					" Immediate Data digest failure while"
+					" in ERL=0.\n");
+				iscsit_add_reject_from_cmd(
+						ISCSI_REASON_DATA_DIGEST_ERROR,
+						1, 0, buf, cmd);
+				return IMMEDIATE_DATA_CANNOT_RECOVER;
+			} else {
+				iscsit_add_reject_from_cmd(
+						ISCSI_REASON_DATA_DIGEST_ERROR,
+						0, 0, buf, cmd);
+				return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
+			}
+		} else {
+			pr_debug("Got CRC32C DataDigest 0x%08x for"
+				" %u bytes of Immediate Data\n", checksum,
+				length);
+		}
+	}
+
+	cmd->write_data_done += length;
+
+	if (cmd->write_data_done == cmd->data_length) {
+		spin_lock_bh(&cmd->istate_lock);
+		cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
+		cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+		spin_unlock_bh(&cmd->istate_lock);
+	}
+
+	return IMMEDIATE_DATA_NORMAL_OPERATION;
+}
+
+/*
+ *	Called with sess->conn_lock held.
+ */
+/* #warning iscsi_build_conn_drop_async_message() only sends out on connections
+	with active network interface */
+static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
+{
+	struct iscsi_cmd *cmd;
+	struct iscsi_conn *conn_p;
+	bool found = false;
+
+	/*
+	 * Only send a Asynchronous Message on connections whos network
+	 * interface is still functional.
+	 */
+	list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
+		if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
+			iscsit_inc_conn_usage_count(conn_p);
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return;
+
+	cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
+	if (!cmd) {
+		iscsit_dec_conn_usage_count(conn_p);
+		return;
+	}
+
+	cmd->logout_cid = conn->cid;
+	cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
+	cmd->i_state = ISTATE_SEND_ASYNCMSG;
+
+	spin_lock_bh(&conn_p->cmd_lock);
+	list_add_tail(&cmd->i_list, &conn_p->conn_cmd_list);
+	spin_unlock_bh(&conn_p->cmd_lock);
+
+	iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state);
+	iscsit_dec_conn_usage_count(conn_p);
+}
+
+static int iscsit_send_conn_drop_async_message(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	struct iscsi_async *hdr;
+
+	cmd->tx_size = ISCSI_HDR_LEN;
+	cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
+
+	hdr			= (struct iscsi_async *) cmd->pdu;
+	hdr->opcode		= ISCSI_OP_ASYNC_EVENT;
+	hdr->flags		= ISCSI_FLAG_CMD_FINAL;
+	cmd->init_task_tag	= 0xFFFFFFFF;
+	cmd->targ_xfer_tag	= 0xFFFFFFFF;
+	put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]);
+	cmd->stat_sn		= conn->stat_sn++;
+	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
+	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+	hdr->async_event	= ISCSI_ASYNC_MSG_DROPPING_CONNECTION;
+	hdr->param1		= cpu_to_be16(cmd->logout_cid);
+	hdr->param2		= cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
+	hdr->param3		= cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+				(unsigned char *)hdr, ISCSI_HDR_LEN,
+				0, NULL, (u8 *)header_digest);
+
+		cmd->tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32C HeaderDigest to"
+			" Async Message 0x%08x\n", *header_digest);
+	}
+
+	cmd->iov_misc[0].iov_base	= cmd->pdu;
+	cmd->iov_misc[0].iov_len	= cmd->tx_size;
+	cmd->iov_misc_count		= 1;
+
+	pr_debug("Sending Connection Dropped Async Message StatSN:"
+		" 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
+			cmd->logout_cid, conn->cid);
+	return 0;
+}
+
+static int iscsit_send_data_in(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn,
+	int *eodr)
+{
+	int iov_ret = 0, set_statsn = 0;
+	u32 iov_count = 0, tx_size = 0;
+	struct iscsi_datain datain;
+	struct iscsi_datain_req *dr;
+	struct iscsi_data_rsp *hdr;
+	struct kvec *iov;
+
+	memset(&datain, 0, sizeof(struct iscsi_datain));
+	dr = iscsit_get_datain_values(cmd, &datain);
+	if (!dr) {
+		pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n",
+				cmd->init_task_tag);
+		return -1;
+	}
+
+	/*
+	 * Be paranoid and double check the logic for now.
+	 */
+	if ((datain.offset + datain.length) > cmd->data_length) {
+		pr_err("Command ITT: 0x%08x, datain.offset: %u and"
+			" datain.length: %u exceeds cmd->data_length: %u\n",
+			cmd->init_task_tag, datain.offset, datain.length,
+				cmd->data_length);
+		return -1;
+	}
+
+	spin_lock_bh(&conn->sess->session_stats_lock);
+	conn->sess->tx_data_octets += datain.length;
+	if (conn->sess->se_sess->se_node_acl) {
+		spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
+		conn->sess->se_sess->se_node_acl->read_bytes += datain.length;
+		spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
+	}
+	spin_unlock_bh(&conn->sess->session_stats_lock);
+	/*
+	 * Special case for successfully execution w/ both DATAIN
+	 * and Sense Data.
+	 */
+	if ((datain.flags & ISCSI_FLAG_DATA_STATUS) &&
+	    (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
+		datain.flags &= ~ISCSI_FLAG_DATA_STATUS;
+	else {
+		if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) ||
+		    (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) {
+			iscsit_increment_maxcmdsn(cmd, conn->sess);
+			cmd->stat_sn = conn->stat_sn++;
+			set_statsn = 1;
+		} else if (dr->dr_complete ==
+				DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
+			set_statsn = 1;
+	}
+
+	hdr	= (struct iscsi_data_rsp *) cmd->pdu;
+	memset(hdr, 0, ISCSI_HDR_LEN);
+	hdr->opcode		= ISCSI_OP_SCSI_DATA_IN;
+	hdr->flags		= datain.flags;
+	if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+		if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
+			hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
+			hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
+		} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
+			hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
+			hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
+		}
+	}
+	hton24(hdr->dlength, datain.length);
+	if (hdr->flags & ISCSI_FLAG_DATA_ACK)
+		int_to_scsilun(cmd->se_cmd.orig_fe_lun,
+				(struct scsi_lun *)&hdr->lun);
+	else
+		put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
+
+	hdr->itt		= cpu_to_be32(cmd->init_task_tag);
+	hdr->ttt		= (hdr->flags & ISCSI_FLAG_DATA_ACK) ?
+				   cpu_to_be32(cmd->targ_xfer_tag) :
+				   0xFFFFFFFF;
+	hdr->statsn		= (set_statsn) ? cpu_to_be32(cmd->stat_sn) :
+						0xFFFFFFFF;
+	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+	hdr->datasn		= cpu_to_be32(datain.data_sn);
+	hdr->offset		= cpu_to_be32(datain.offset);
+
+	iov = &cmd->iov_data[0];
+	iov[iov_count].iov_base	= cmd->pdu;
+	iov[iov_count++].iov_len	= ISCSI_HDR_LEN;
+	tx_size += ISCSI_HDR_LEN;
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+				(unsigned char *)hdr, ISCSI_HDR_LEN,
+				0, NULL, (u8 *)header_digest);
+
+		iov[0].iov_len += ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+
+		pr_debug("Attaching CRC32 HeaderDigest"
+			" for DataIN PDU 0x%08x\n", *header_digest);
+	}
+
+	iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], datain.offset, datain.length);
+	if (iov_ret < 0)
+		return -1;
+
+	iov_count += iov_ret;
+	tx_size += datain.length;
+
+	cmd->padding = ((-datain.length) & 3);
+	if (cmd->padding) {
+		iov[iov_count].iov_base		= cmd->pad_bytes;
+		iov[iov_count++].iov_len	= cmd->padding;
+		tx_size += cmd->padding;
+
+		pr_debug("Attaching %u padding bytes\n",
+				cmd->padding);
+	}
+	if (conn->conn_ops->DataDigest) {
+		cmd->data_crc = iscsit_do_crypto_hash_sg(&conn->conn_tx_hash, cmd,
+			 datain.offset, datain.length, cmd->padding, cmd->pad_bytes);
+
+		iov[iov_count].iov_base	= &cmd->data_crc;
+		iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+
+		pr_debug("Attached CRC32C DataDigest %d bytes, crc"
+			" 0x%08x\n", datain.length+cmd->padding, cmd->data_crc);
+	}
+
+	cmd->iov_data_count = iov_count;
+	cmd->tx_size = tx_size;
+
+	pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
+		" DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
+		cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
+		ntohl(hdr->offset), datain.length, conn->cid);
+
+	if (dr->dr_complete) {
+		*eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
+				2 : 1;
+		iscsit_free_datain_req(cmd, dr);
+	}
+
+	return 0;
+}
+
+static int iscsit_send_logout_response(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	int niov = 0, tx_size;
+	struct iscsi_conn *logout_conn = NULL;
+	struct iscsi_conn_recovery *cr = NULL;
+	struct iscsi_session *sess = conn->sess;
+	struct kvec *iov;
+	struct iscsi_logout_rsp *hdr;
+	/*
+	 * The actual shutting down of Sessions and/or Connections
+	 * for CLOSESESSION and CLOSECONNECTION Logout Requests
+	 * is done in scsi_logout_post_handler().
+	 */
+	switch (cmd->logout_reason) {
+	case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
+		pr_debug("iSCSI session logout successful, setting"
+			" logout response to ISCSI_LOGOUT_SUCCESS.\n");
+		cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
+		break;
+	case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
+		if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND)
+			break;
+		/*
+		 * For CLOSECONNECTION logout requests carrying
+		 * a matching logout CID -> local CID, the reference
+		 * for the local CID will have been incremented in
+		 * iscsi_logout_closeconnection().
+		 *
+		 * For CLOSECONNECTION logout requests carrying
+		 * a different CID than the connection it arrived
+		 * on, the connection responding to cmd->logout_cid
+		 * is stopped in iscsit_logout_post_handler_diffcid().
+		 */
+
+		pr_debug("iSCSI CID: %hu logout on CID: %hu"
+			" successful.\n", cmd->logout_cid, conn->cid);
+		cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
+		break;
+	case ISCSI_LOGOUT_REASON_RECOVERY:
+		if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) ||
+		    (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED))
+			break;
+		/*
+		 * If the connection is still active from our point of view
+		 * force connection recovery to occur.
+		 */
+		logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
+				cmd->logout_cid);
+		if ((logout_conn)) {
+			iscsit_connection_reinstatement_rcfr(logout_conn);
+			iscsit_dec_conn_usage_count(logout_conn);
+		}
+
+		cr = iscsit_get_inactive_connection_recovery_entry(
+				conn->sess, cmd->logout_cid);
+		if (!cr) {
+			pr_err("Unable to locate CID: %hu for"
+			" REMOVECONNFORRECOVERY Logout Request.\n",
+				cmd->logout_cid);
+			cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
+			break;
+		}
+
+		iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn);
+
+		pr_debug("iSCSI REMOVECONNFORRECOVERY logout"
+			" for recovery for CID: %hu on CID: %hu successful.\n",
+				cmd->logout_cid, conn->cid);
+		cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
+		break;
+	default:
+		pr_err("Unknown cmd->logout_reason: 0x%02x\n",
+				cmd->logout_reason);
+		return -1;
+	}
+
+	tx_size = ISCSI_HDR_LEN;
+	hdr			= (struct iscsi_logout_rsp *)cmd->pdu;
+	memset(hdr, 0, ISCSI_HDR_LEN);
+	hdr->opcode		= ISCSI_OP_LOGOUT_RSP;
+	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
+	hdr->response		= cmd->logout_response;
+	hdr->itt		= cpu_to_be32(cmd->init_task_tag);
+	cmd->stat_sn		= conn->stat_sn++;
+	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
+
+	iscsit_increment_maxcmdsn(cmd, conn->sess);
+	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+
+	iov = &cmd->iov_misc[0];
+	iov[niov].iov_base	= cmd->pdu;
+	iov[niov++].iov_len	= ISCSI_HDR_LEN;
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+				(unsigned char *)hdr, ISCSI_HDR_LEN,
+				0, NULL, (u8 *)header_digest);
+
+		iov[0].iov_len += ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32C HeaderDigest to"
+			" Logout Response 0x%08x\n", *header_digest);
+	}
+	cmd->iov_misc_count = niov;
+	cmd->tx_size = tx_size;
+
+	pr_debug("Sending Logout Response ITT: 0x%08x StatSN:"
+		" 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
+		cmd->init_task_tag, cmd->stat_sn, hdr->response,
+		cmd->logout_cid, conn->cid);
+
+	return 0;
+}
+
+/*
+ *	Unsolicited NOPIN, either requesting a response or not.
+ */
+static int iscsit_send_unsolicited_nopin(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn,
+	int want_response)
+{
+	int tx_size = ISCSI_HDR_LEN;
+	struct iscsi_nopin *hdr;
+
+	hdr			= (struct iscsi_nopin *) cmd->pdu;
+	memset(hdr, 0, ISCSI_HDR_LEN);
+	hdr->opcode		= ISCSI_OP_NOOP_IN;
+	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
+	hdr->itt		= cpu_to_be32(cmd->init_task_tag);
+	hdr->ttt		= cpu_to_be32(cmd->targ_xfer_tag);
+	cmd->stat_sn		= conn->stat_sn;
+	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
+	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+				(unsigned char *)hdr, ISCSI_HDR_LEN,
+				0, NULL, (u8 *)header_digest);
+
+		tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32C HeaderDigest to"
+			" NopIN 0x%08x\n", *header_digest);
+	}
+
+	cmd->iov_misc[0].iov_base	= cmd->pdu;
+	cmd->iov_misc[0].iov_len	= tx_size;
+	cmd->iov_misc_count	= 1;
+	cmd->tx_size		= tx_size;
+
+	pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
+		" 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
+
+	return 0;
+}
+
+static int iscsit_send_nopin_response(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	int niov = 0, tx_size;
+	u32 padding = 0;
+	struct kvec *iov;
+	struct iscsi_nopin *hdr;
+
+	tx_size = ISCSI_HDR_LEN;
+	hdr			= (struct iscsi_nopin *) cmd->pdu;
+	memset(hdr, 0, ISCSI_HDR_LEN);
+	hdr->opcode		= ISCSI_OP_NOOP_IN;
+	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
+	hton24(hdr->dlength, cmd->buf_ptr_size);
+	put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
+	hdr->itt		= cpu_to_be32(cmd->init_task_tag);
+	hdr->ttt		= cpu_to_be32(cmd->targ_xfer_tag);
+	cmd->stat_sn		= conn->stat_sn++;
+	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
+
+	iscsit_increment_maxcmdsn(cmd, conn->sess);
+	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+
+	iov = &cmd->iov_misc[0];
+	iov[niov].iov_base	= cmd->pdu;
+	iov[niov++].iov_len	= ISCSI_HDR_LEN;
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+				(unsigned char *)hdr, ISCSI_HDR_LEN,
+				0, NULL, (u8 *)header_digest);
+
+		iov[0].iov_len += ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32C HeaderDigest"
+			" to NopIn 0x%08x\n", *header_digest);
+	}
+
+	/*
+	 * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr.
+	 * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size.
+	 */
+	if (cmd->buf_ptr_size) {
+		iov[niov].iov_base	= cmd->buf_ptr;
+		iov[niov++].iov_len	= cmd->buf_ptr_size;
+		tx_size += cmd->buf_ptr_size;
+
+		pr_debug("Echoing back %u bytes of ping"
+			" data.\n", cmd->buf_ptr_size);
+
+		padding = ((-cmd->buf_ptr_size) & 3);
+		if (padding != 0) {
+			iov[niov].iov_base = &cmd->pad_bytes;
+			iov[niov++].iov_len = padding;
+			tx_size += padding;
+			pr_debug("Attaching %u additional"
+				" padding bytes.\n", padding);
+		}
+		if (conn->conn_ops->DataDigest) {
+			iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+				cmd->buf_ptr, cmd->buf_ptr_size,
+				padding, (u8 *)&cmd->pad_bytes,
+				(u8 *)&cmd->data_crc);
+
+			iov[niov].iov_base = &cmd->data_crc;
+			iov[niov++].iov_len = ISCSI_CRC_LEN;
+			tx_size += ISCSI_CRC_LEN;
+			pr_debug("Attached DataDigest for %u"
+				" bytes of ping data, CRC 0x%08x\n",
+				cmd->buf_ptr_size, cmd->data_crc);
+		}
+	}
+
+	cmd->iov_misc_count = niov;
+	cmd->tx_size = tx_size;
+
+	pr_debug("Sending NOPIN Response ITT: 0x%08x, TTT:"
+		" 0x%08x, StatSN: 0x%08x, Length %u\n", cmd->init_task_tag,
+		cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
+
+	return 0;
+}
+
+int iscsit_send_r2t(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	int tx_size = 0;
+	struct iscsi_r2t *r2t;
+	struct iscsi_r2t_rsp *hdr;
+
+	r2t = iscsit_get_r2t_from_list(cmd);
+	if (!r2t)
+		return -1;
+
+	hdr			= (struct iscsi_r2t_rsp *) cmd->pdu;
+	memset(hdr, 0, ISCSI_HDR_LEN);
+	hdr->opcode		= ISCSI_OP_R2T;
+	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
+	int_to_scsilun(cmd->se_cmd.orig_fe_lun,
+			(struct scsi_lun *)&hdr->lun);
+	hdr->itt		= cpu_to_be32(cmd->init_task_tag);
+	spin_lock_bh(&conn->sess->ttt_lock);
+	r2t->targ_xfer_tag	= conn->sess->targ_xfer_tag++;
+	if (r2t->targ_xfer_tag == 0xFFFFFFFF)
+		r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
+	spin_unlock_bh(&conn->sess->ttt_lock);
+	hdr->ttt		= cpu_to_be32(r2t->targ_xfer_tag);
+	hdr->statsn		= cpu_to_be32(conn->stat_sn);
+	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+	hdr->r2tsn		= cpu_to_be32(r2t->r2t_sn);
+	hdr->data_offset	= cpu_to_be32(r2t->offset);
+	hdr->data_length	= cpu_to_be32(r2t->xfer_len);
+
+	cmd->iov_misc[0].iov_base	= cmd->pdu;
+	cmd->iov_misc[0].iov_len	= ISCSI_HDR_LEN;
+	tx_size += ISCSI_HDR_LEN;
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+				(unsigned char *)hdr, ISCSI_HDR_LEN,
+				0, NULL, (u8 *)header_digest);
+
+		cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32 HeaderDigest for R2T"
+			" PDU 0x%08x\n", *header_digest);
+	}
+
+	pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
+		" 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
+		(!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
+		r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
+			r2t->offset, r2t->xfer_len, conn->cid);
+
+	cmd->iov_misc_count = 1;
+	cmd->tx_size = tx_size;
+
+	spin_lock_bh(&cmd->r2t_lock);
+	r2t->sent_r2t = 1;
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	return 0;
+}
+
+/*
+ *	type 0: Normal Operation.
+ *	type 1: Called from Storage Transport.
+ *	type 2: Called from iscsi_task_reassign_complete_write() for
+ *	        connection recovery.
+ */
+int iscsit_build_r2ts_for_cmd(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn,
+	int type)
+{
+	int first_r2t = 1;
+	u32 offset = 0, xfer_len = 0;
+
+	spin_lock_bh(&cmd->r2t_lock);
+	if (cmd->cmd_flags & ICF_SENT_LAST_R2T) {
+		spin_unlock_bh(&cmd->r2t_lock);
+		return 0;
+	}
+
+	if (conn->sess->sess_ops->DataSequenceInOrder && (type != 2))
+		if (cmd->r2t_offset < cmd->write_data_done)
+			cmd->r2t_offset = cmd->write_data_done;
+
+	while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) {
+		if (conn->sess->sess_ops->DataSequenceInOrder) {
+			offset = cmd->r2t_offset;
+
+			if (first_r2t && (type == 2)) {
+				xfer_len = ((offset +
+					     (conn->sess->sess_ops->MaxBurstLength -
+					     cmd->next_burst_len) >
+					     cmd->data_length) ?
+					    (cmd->data_length - offset) :
+					    (conn->sess->sess_ops->MaxBurstLength -
+					     cmd->next_burst_len));
+			} else {
+				xfer_len = ((offset +
+					     conn->sess->sess_ops->MaxBurstLength) >
+					     cmd->data_length) ?
+					     (cmd->data_length - offset) :
+					     conn->sess->sess_ops->MaxBurstLength;
+			}
+			cmd->r2t_offset += xfer_len;
+
+			if (cmd->r2t_offset == cmd->data_length)
+				cmd->cmd_flags |= ICF_SENT_LAST_R2T;
+		} else {
+			struct iscsi_seq *seq;
+
+			seq = iscsit_get_seq_holder_for_r2t(cmd);
+			if (!seq) {
+				spin_unlock_bh(&cmd->r2t_lock);
+				return -1;
+			}
+
+			offset = seq->offset;
+			xfer_len = seq->xfer_len;
+
+			if (cmd->seq_send_order == cmd->seq_count)
+				cmd->cmd_flags |= ICF_SENT_LAST_R2T;
+		}
+		cmd->outstanding_r2ts++;
+		first_r2t = 0;
+
+		if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) {
+			spin_unlock_bh(&cmd->r2t_lock);
+			return -1;
+		}
+
+		if (cmd->cmd_flags & ICF_SENT_LAST_R2T)
+			break;
+	}
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	return 0;
+}
+
+static int iscsit_send_status(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	u8 iov_count = 0, recovery;
+	u32 padding = 0, tx_size = 0;
+	struct iscsi_scsi_rsp *hdr;
+	struct kvec *iov;
+
+	recovery = (cmd->i_state != ISTATE_SEND_STATUS);
+	if (!recovery)
+		cmd->stat_sn = conn->stat_sn++;
+
+	spin_lock_bh(&conn->sess->session_stats_lock);
+	conn->sess->rsp_pdus++;
+	spin_unlock_bh(&conn->sess->session_stats_lock);
+
+	hdr			= (struct iscsi_scsi_rsp *) cmd->pdu;
+	memset(hdr, 0, ISCSI_HDR_LEN);
+	hdr->opcode		= ISCSI_OP_SCSI_CMD_RSP;
+	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
+	if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
+		hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
+		hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
+	} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
+		hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
+		hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
+	}
+	hdr->response		= cmd->iscsi_response;
+	hdr->cmd_status		= cmd->se_cmd.scsi_status;
+	hdr->itt		= cpu_to_be32(cmd->init_task_tag);
+	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
+
+	iscsit_increment_maxcmdsn(cmd, conn->sess);
+	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+
+	iov = &cmd->iov_misc[0];
+	iov[iov_count].iov_base	= cmd->pdu;
+	iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+	tx_size += ISCSI_HDR_LEN;
+
+	/*
+	 * Attach SENSE DATA payload to iSCSI Response PDU
+	 */
+	if (cmd->se_cmd.sense_buffer &&
+	   ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
+	    (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
+		padding		= -(cmd->se_cmd.scsi_sense_length) & 3;
+		hton24(hdr->dlength, cmd->se_cmd.scsi_sense_length);
+		iov[iov_count].iov_base	= cmd->se_cmd.sense_buffer;
+		iov[iov_count++].iov_len =
+				(cmd->se_cmd.scsi_sense_length + padding);
+		tx_size += cmd->se_cmd.scsi_sense_length;
+
+		if (padding) {
+			memset(cmd->se_cmd.sense_buffer +
+				cmd->se_cmd.scsi_sense_length, 0, padding);
+			tx_size += padding;
+			pr_debug("Adding %u bytes of padding to"
+				" SENSE.\n", padding);
+		}
+
+		if (conn->conn_ops->DataDigest) {
+			iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+				cmd->se_cmd.sense_buffer,
+				(cmd->se_cmd.scsi_sense_length + padding),
+				0, NULL, (u8 *)&cmd->data_crc);
+
+			iov[iov_count].iov_base    = &cmd->data_crc;
+			iov[iov_count++].iov_len     = ISCSI_CRC_LEN;
+			tx_size += ISCSI_CRC_LEN;
+
+			pr_debug("Attaching CRC32 DataDigest for"
+				" SENSE, %u bytes CRC 0x%08x\n",
+				(cmd->se_cmd.scsi_sense_length + padding),
+				cmd->data_crc);
+		}
+
+		pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
+				" Response PDU\n",
+				cmd->se_cmd.scsi_sense_length);
+	}
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+				(unsigned char *)hdr, ISCSI_HDR_LEN,
+				0, NULL, (u8 *)header_digest);
+
+		iov[0].iov_len += ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32 HeaderDigest for Response"
+				" PDU 0x%08x\n", *header_digest);
+	}
+
+	cmd->iov_misc_count = iov_count;
+	cmd->tx_size = tx_size;
+
+	pr_debug("Built %sSCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
+		" Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
+		(!recovery) ? "" : "Recovery ", cmd->init_task_tag,
+		cmd->stat_sn, 0x00, cmd->se_cmd.scsi_status, conn->cid);
+
+	return 0;
+}
+
+static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
+{
+	switch (se_tmr->response) {
+	case TMR_FUNCTION_COMPLETE:
+		return ISCSI_TMF_RSP_COMPLETE;
+	case TMR_TASK_DOES_NOT_EXIST:
+		return ISCSI_TMF_RSP_NO_TASK;
+	case TMR_LUN_DOES_NOT_EXIST:
+		return ISCSI_TMF_RSP_NO_LUN;
+	case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
+		return ISCSI_TMF_RSP_NOT_SUPPORTED;
+	case TMR_FUNCTION_AUTHORIZATION_FAILED:
+		return ISCSI_TMF_RSP_AUTH_FAILED;
+	case TMR_FUNCTION_REJECTED:
+	default:
+		return ISCSI_TMF_RSP_REJECTED;
+	}
+}
+
+static int iscsit_send_task_mgt_rsp(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
+	struct iscsi_tm_rsp *hdr;
+	u32 tx_size = 0;
+
+	hdr			= (struct iscsi_tm_rsp *) cmd->pdu;
+	memset(hdr, 0, ISCSI_HDR_LEN);
+	hdr->opcode		= ISCSI_OP_SCSI_TMFUNC_RSP;
+	hdr->flags		= ISCSI_FLAG_CMD_FINAL;
+	hdr->response		= iscsit_convert_tcm_tmr_rsp(se_tmr);
+	hdr->itt		= cpu_to_be32(cmd->init_task_tag);
+	cmd->stat_sn		= conn->stat_sn++;
+	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
+
+	iscsit_increment_maxcmdsn(cmd, conn->sess);
+	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+
+	cmd->iov_misc[0].iov_base	= cmd->pdu;
+	cmd->iov_misc[0].iov_len	= ISCSI_HDR_LEN;
+	tx_size += ISCSI_HDR_LEN;
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+				(unsigned char *)hdr, ISCSI_HDR_LEN,
+				0, NULL, (u8 *)header_digest);
+
+		cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32 HeaderDigest for Task"
+			" Mgmt Response PDU 0x%08x\n", *header_digest);
+	}
+
+	cmd->iov_misc_count = 1;
+	cmd->tx_size = tx_size;
+
+	pr_debug("Built Task Management Response ITT: 0x%08x,"
+		" StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
+		cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
+
+	return 0;
+}
+
+static bool iscsit_check_inaddr_any(struct iscsi_np *np)
+{
+	bool ret = false;
+
+	if (np->np_sockaddr.ss_family == AF_INET6) {
+		const struct sockaddr_in6 sin6 = {
+			.sin6_addr = IN6ADDR_ANY_INIT };
+		struct sockaddr_in6 *sock_in6 =
+			 (struct sockaddr_in6 *)&np->np_sockaddr;
+
+		if (!memcmp(sock_in6->sin6_addr.s6_addr,
+				sin6.sin6_addr.s6_addr, 16))
+			ret = true;
+	} else {
+		struct sockaddr_in * sock_in =
+			(struct sockaddr_in *)&np->np_sockaddr;
+
+		if (sock_in->sin_addr.s_addr == INADDR_ANY)
+			ret = true;
+	}
+
+	return ret;
+}
+
+static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
+{
+	char *payload = NULL;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_portal_group *tpg;
+	struct iscsi_tiqn *tiqn;
+	struct iscsi_tpg_np *tpg_np;
+	int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
+	unsigned char buf[256];
+
+	buffer_len = (conn->conn_ops->MaxRecvDataSegmentLength > 32768) ?
+			32768 : conn->conn_ops->MaxRecvDataSegmentLength;
+
+	memset(buf, 0, 256);
+
+	payload = kzalloc(buffer_len, GFP_KERNEL);
+	if (!payload) {
+		pr_err("Unable to allocate memory for sendtargets"
+				" response.\n");
+		return -ENOMEM;
+	}
+
+	spin_lock(&tiqn_lock);
+	list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
+		len = sprintf(buf, "TargetName=%s", tiqn->tiqn);
+		len += 1;
+
+		if ((len + payload_len) > buffer_len) {
+			end_of_buf = 1;
+			goto eob;
+		}
+		memcpy(payload + payload_len, buf, len);
+		payload_len += len;
+
+		spin_lock(&tiqn->tiqn_tpg_lock);
+		list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
+
+			spin_lock(&tpg->tpg_state_lock);
+			if ((tpg->tpg_state == TPG_STATE_FREE) ||
+			    (tpg->tpg_state == TPG_STATE_INACTIVE)) {
+				spin_unlock(&tpg->tpg_state_lock);
+				continue;
+			}
+			spin_unlock(&tpg->tpg_state_lock);
+
+			spin_lock(&tpg->tpg_np_lock);
+			list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
+						tpg_np_list) {
+				struct iscsi_np *np = tpg_np->tpg_np;
+				bool inaddr_any = iscsit_check_inaddr_any(np);
+
+				len = sprintf(buf, "TargetAddress="
+					"%s%s%s:%hu,%hu",
+					(np->np_sockaddr.ss_family == AF_INET6) ?
+					"[" : "", (inaddr_any == false) ?
+						np->np_ip : conn->local_ip,
+					(np->np_sockaddr.ss_family == AF_INET6) ?
+					"]" : "", (inaddr_any == false) ?
+						np->np_port : conn->local_port,
+					tpg->tpgt);
+				len += 1;
+
+				if ((len + payload_len) > buffer_len) {
+					spin_unlock(&tpg->tpg_np_lock);
+					spin_unlock(&tiqn->tiqn_tpg_lock);
+					end_of_buf = 1;
+					goto eob;
+				}
+				memcpy(payload + payload_len, buf, len);
+				payload_len += len;
+			}
+			spin_unlock(&tpg->tpg_np_lock);
+		}
+		spin_unlock(&tiqn->tiqn_tpg_lock);
+eob:
+		if (end_of_buf)
+			break;
+	}
+	spin_unlock(&tiqn_lock);
+
+	cmd->buf_ptr = payload;
+
+	return payload_len;
+}
+
+/*
+ *	FIXME: Add support for F_BIT and C_BIT when the length is longer than
+ *	MaxRecvDataSegmentLength.
+ */
+static int iscsit_send_text_rsp(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	struct iscsi_text_rsp *hdr;
+	struct kvec *iov;
+	u32 padding = 0, tx_size = 0;
+	int text_length, iov_count = 0;
+
+	text_length = iscsit_build_sendtargets_response(cmd);
+	if (text_length < 0)
+		return text_length;
+
+	padding = ((-text_length) & 3);
+	if (padding != 0) {
+		memset(cmd->buf_ptr + text_length, 0, padding);
+		pr_debug("Attaching %u additional bytes for"
+			" padding.\n", padding);
+	}
+
+	hdr			= (struct iscsi_text_rsp *) cmd->pdu;
+	memset(hdr, 0, ISCSI_HDR_LEN);
+	hdr->opcode		= ISCSI_OP_TEXT_RSP;
+	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
+	hton24(hdr->dlength, text_length);
+	hdr->itt		= cpu_to_be32(cmd->init_task_tag);
+	hdr->ttt		= cpu_to_be32(cmd->targ_xfer_tag);
+	cmd->stat_sn		= conn->stat_sn++;
+	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
+
+	iscsit_increment_maxcmdsn(cmd, conn->sess);
+	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+
+	iov = &cmd->iov_misc[0];
+
+	iov[iov_count].iov_base = cmd->pdu;
+	iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+	iov[iov_count].iov_base	= cmd->buf_ptr;
+	iov[iov_count++].iov_len = text_length + padding;
+
+	tx_size += (ISCSI_HDR_LEN + text_length + padding);
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+				(unsigned char *)hdr, ISCSI_HDR_LEN,
+				0, NULL, (u8 *)header_digest);
+
+		iov[0].iov_len += ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32 HeaderDigest for"
+			" Text Response PDU 0x%08x\n", *header_digest);
+	}
+
+	if (conn->conn_ops->DataDigest) {
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+				cmd->buf_ptr, (text_length + padding),
+				0, NULL, (u8 *)&cmd->data_crc);
+
+		iov[iov_count].iov_base	= &cmd->data_crc;
+		iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+		tx_size	+= ISCSI_CRC_LEN;
+
+		pr_debug("Attaching DataDigest for %u bytes of text"
+			" data, CRC 0x%08x\n", (text_length + padding),
+			cmd->data_crc);
+	}
+
+	cmd->iov_misc_count = iov_count;
+	cmd->tx_size = tx_size;
+
+	pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x,"
+		" Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn,
+			text_length, conn->cid);
+	return 0;
+}
+
+static int iscsit_send_reject(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	u32 iov_count = 0, tx_size = 0;
+	struct iscsi_reject *hdr;
+	struct kvec *iov;
+
+	hdr			= (struct iscsi_reject *) cmd->pdu;
+	hdr->opcode		= ISCSI_OP_REJECT;
+	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
+	hton24(hdr->dlength, ISCSI_HDR_LEN);
+	hdr->ffffffff		= 0xffffffff;
+	cmd->stat_sn		= conn->stat_sn++;
+	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
+	hdr->exp_cmdsn	= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn	= cpu_to_be32(conn->sess->max_cmd_sn);
+
+	iov = &cmd->iov_misc[0];
+
+	iov[iov_count].iov_base = cmd->pdu;
+	iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+	iov[iov_count].iov_base = cmd->buf_ptr;
+	iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+
+	tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN);
+
+	if (conn->conn_ops->HeaderDigest) {
+		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+				(unsigned char *)hdr, ISCSI_HDR_LEN,
+				0, NULL, (u8 *)header_digest);
+
+		iov[0].iov_len += ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32 HeaderDigest for"
+			" REJECT PDU 0x%08x\n", *header_digest);
+	}
+
+	if (conn->conn_ops->DataDigest) {
+		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+				(unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN,
+				0, NULL, (u8 *)&cmd->data_crc);
+
+		iov[iov_count].iov_base = &cmd->data_crc;
+		iov[iov_count++].iov_len  = ISCSI_CRC_LEN;
+		tx_size += ISCSI_CRC_LEN;
+		pr_debug("Attaching CRC32 DataDigest for REJECT"
+				" PDU 0x%08x\n", cmd->data_crc);
+	}
+
+	cmd->iov_misc_count = iov_count;
+	cmd->tx_size = tx_size;
+
+	pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
+		" CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
+
+	return 0;
+}
+
+static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
+{
+	if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
+	    (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
+		wait_for_completion_interruptible_timeout(
+					&conn->tx_half_close_comp,
+					ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
+	}
+}
+
+#ifdef CONFIG_SMP
+
+void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
+{
+	struct iscsi_thread_set *ts = conn->thread_set;
+	int ord, cpu;
+	/*
+	 * thread_id is assigned from iscsit_global->ts_bitmap from
+	 * within iscsi_thread_set.c:iscsi_allocate_thread_sets()
+	 *
+	 * Here we use thread_id to determine which CPU that this
+	 * iSCSI connection's iscsi_thread_set will be scheduled to
+	 * execute upon.
+	 */
+	ord = ts->thread_id % cpumask_weight(cpu_online_mask);
+#if 0
+	pr_debug(">>>>>>>>>>>>>>>>>>>> Generated ord: %d from"
+			" thread_id: %d\n", ord, ts->thread_id);
+#endif
+	for_each_online_cpu(cpu) {
+		if (ord-- == 0) {
+			cpumask_set_cpu(cpu, conn->conn_cpumask);
+			return;
+		}
+	}
+	/*
+	 * This should never be reached..
+	 */
+	dump_stack();
+	cpumask_setall(conn->conn_cpumask);
+}
+
+static inline void iscsit_thread_check_cpumask(
+	struct iscsi_conn *conn,
+	struct task_struct *p,
+	int mode)
+{
+	char buf[128];
+	/*
+	 * mode == 1 signals iscsi_target_tx_thread() usage.
+	 * mode == 0 signals iscsi_target_rx_thread() usage.
+	 */
+	if (mode == 1) {
+		if (!conn->conn_tx_reset_cpumask)
+			return;
+		conn->conn_tx_reset_cpumask = 0;
+	} else {
+		if (!conn->conn_rx_reset_cpumask)
+			return;
+		conn->conn_rx_reset_cpumask = 0;
+	}
+	/*
+	 * Update the CPU mask for this single kthread so that
+	 * both TX and RX kthreads are scheduled to run on the
+	 * same CPU.
+	 */
+	memset(buf, 0, 128);
+	cpumask_scnprintf(buf, 128, conn->conn_cpumask);
+#if 0
+	pr_debug(">>>>>>>>>>>>>> Calling set_cpus_allowed_ptr():"
+			" %s for %s\n", buf, p->comm);
+#endif
+	set_cpus_allowed_ptr(p, conn->conn_cpumask);
+}
+
+#else
+
+void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
+{
+	return;
+}
+
+#define iscsit_thread_check_cpumask(X, Y, Z) ({})
+#endif /* CONFIG_SMP */
+
+int iscsi_target_tx_thread(void *arg)
+{
+	u8 state;
+	int eodr = 0;
+	int ret = 0;
+	int sent_status = 0;
+	int use_misc = 0;
+	int map_sg = 0;
+	struct iscsi_cmd *cmd = NULL;
+	struct iscsi_conn *conn;
+	struct iscsi_queue_req *qr = NULL;
+	struct iscsi_thread_set *ts = arg;
+	/*
+	 * Allow ourselves to be interrupted by SIGINT so that a
+	 * connection recovery / failure event can be triggered externally.
+	 */
+	allow_signal(SIGINT);
+
+restart:
+	conn = iscsi_tx_thread_pre_handler(ts);
+	if (!conn)
+		goto out;
+
+	eodr = map_sg = ret = sent_status = use_misc = 0;
+
+	while (!kthread_should_stop()) {
+		/*
+		 * Ensure that both TX and RX per connection kthreads
+		 * are scheduled to run on the same CPU.
+		 */
+		iscsit_thread_check_cpumask(conn, current, 1);
+
+		wait_event_interruptible(conn->queues_wq,
+					 !iscsit_conn_all_queues_empty(conn) ||
+					 ts->status == ISCSI_THREAD_SET_RESET);
+
+		if ((ts->status == ISCSI_THREAD_SET_RESET) ||
+		     signal_pending(current))
+			goto transport_err;
+
+get_immediate:
+		qr = iscsit_get_cmd_from_immediate_queue(conn);
+		if (qr) {
+			atomic_set(&conn->check_immediate_queue, 0);
+			cmd = qr->cmd;
+			state = qr->state;
+			kmem_cache_free(lio_qr_cache, qr);
+
+			spin_lock_bh(&cmd->istate_lock);
+			switch (state) {
+			case ISTATE_SEND_R2T:
+				spin_unlock_bh(&cmd->istate_lock);
+				ret = iscsit_send_r2t(cmd, conn);
+				break;
+			case ISTATE_REMOVE:
+				spin_unlock_bh(&cmd->istate_lock);
+
+				if (cmd->data_direction == DMA_TO_DEVICE)
+					iscsit_stop_dataout_timer(cmd);
+
+				spin_lock_bh(&conn->cmd_lock);
+				list_del(&cmd->i_list);
+				spin_unlock_bh(&conn->cmd_lock);
+
+				iscsit_free_cmd(cmd);
+				goto get_immediate;
+			case ISTATE_SEND_NOPIN_WANT_RESPONSE:
+				spin_unlock_bh(&cmd->istate_lock);
+				iscsit_mod_nopin_response_timer(conn);
+				ret = iscsit_send_unsolicited_nopin(cmd,
+						conn, 1);
+				break;
+			case ISTATE_SEND_NOPIN_NO_RESPONSE:
+				spin_unlock_bh(&cmd->istate_lock);
+				ret = iscsit_send_unsolicited_nopin(cmd,
+						conn, 0);
+				break;
+			default:
+				pr_err("Unknown Opcode: 0x%02x ITT:"
+				" 0x%08x, i_state: %d on CID: %hu\n",
+				cmd->iscsi_opcode, cmd->init_task_tag, state,
+				conn->cid);
+				spin_unlock_bh(&cmd->istate_lock);
+				goto transport_err;
+			}
+			if (ret < 0) {
+				conn->tx_immediate_queue = 0;
+				goto transport_err;
+			}
+
+			if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
+				conn->tx_immediate_queue = 0;
+				iscsit_tx_thread_wait_for_tcp(conn);
+				goto transport_err;
+			}
+
+			spin_lock_bh(&cmd->istate_lock);
+			switch (state) {
+			case ISTATE_SEND_R2T:
+				spin_unlock_bh(&cmd->istate_lock);
+				spin_lock_bh(&cmd->dataout_timeout_lock);
+				iscsit_start_dataout_timer(cmd, conn);
+				spin_unlock_bh(&cmd->dataout_timeout_lock);
+				break;
+			case ISTATE_SEND_NOPIN_WANT_RESPONSE:
+				cmd->i_state = ISTATE_SENT_NOPIN_WANT_RESPONSE;
+				spin_unlock_bh(&cmd->istate_lock);
+				break;
+			case ISTATE_SEND_NOPIN_NO_RESPONSE:
+				cmd->i_state = ISTATE_SENT_STATUS;
+				spin_unlock_bh(&cmd->istate_lock);
+				break;
+			default:
+				pr_err("Unknown Opcode: 0x%02x ITT:"
+					" 0x%08x, i_state: %d on CID: %hu\n",
+					cmd->iscsi_opcode, cmd->init_task_tag,
+					state, conn->cid);
+				spin_unlock_bh(&cmd->istate_lock);
+				goto transport_err;
+			}
+			goto get_immediate;
+		} else
+			conn->tx_immediate_queue = 0;
+
+get_response:
+		qr = iscsit_get_cmd_from_response_queue(conn);
+		if (qr) {
+			cmd = qr->cmd;
+			state = qr->state;
+			kmem_cache_free(lio_qr_cache, qr);
+
+			spin_lock_bh(&cmd->istate_lock);
+check_rsp_state:
+			switch (state) {
+			case ISTATE_SEND_DATAIN:
+				spin_unlock_bh(&cmd->istate_lock);
+				ret = iscsit_send_data_in(cmd, conn,
+							  &eodr);
+				map_sg = 1;
+				break;
+			case ISTATE_SEND_STATUS:
+			case ISTATE_SEND_STATUS_RECOVERY:
+				spin_unlock_bh(&cmd->istate_lock);
+				use_misc = 1;
+				ret = iscsit_send_status(cmd, conn);
+				break;
+			case ISTATE_SEND_LOGOUTRSP:
+				spin_unlock_bh(&cmd->istate_lock);
+				use_misc = 1;
+				ret = iscsit_send_logout_response(cmd, conn);
+				break;
+			case ISTATE_SEND_ASYNCMSG:
+				spin_unlock_bh(&cmd->istate_lock);
+				use_misc = 1;
+				ret = iscsit_send_conn_drop_async_message(
+						cmd, conn);
+				break;
+			case ISTATE_SEND_NOPIN:
+				spin_unlock_bh(&cmd->istate_lock);
+				use_misc = 1;
+				ret = iscsit_send_nopin_response(cmd, conn);
+				break;
+			case ISTATE_SEND_REJECT:
+				spin_unlock_bh(&cmd->istate_lock);
+				use_misc = 1;
+				ret = iscsit_send_reject(cmd, conn);
+				break;
+			case ISTATE_SEND_TASKMGTRSP:
+				spin_unlock_bh(&cmd->istate_lock);
+				use_misc = 1;
+				ret = iscsit_send_task_mgt_rsp(cmd, conn);
+				if (ret != 0)
+					break;
+				ret = iscsit_tmr_post_handler(cmd, conn);
+				if (ret != 0)
+					iscsit_fall_back_to_erl0(conn->sess);
+				break;
+			case ISTATE_SEND_TEXTRSP:
+				spin_unlock_bh(&cmd->istate_lock);
+				use_misc = 1;
+				ret = iscsit_send_text_rsp(cmd, conn);
+				break;
+			default:
+				pr_err("Unknown Opcode: 0x%02x ITT:"
+					" 0x%08x, i_state: %d on CID: %hu\n",
+					cmd->iscsi_opcode, cmd->init_task_tag,
+					state, conn->cid);
+				spin_unlock_bh(&cmd->istate_lock);
+				goto transport_err;
+			}
+			if (ret < 0) {
+				conn->tx_response_queue = 0;
+				goto transport_err;
+			}
+
+			if (map_sg && !conn->conn_ops->IFMarker) {
+				if (iscsit_fe_sendpage_sg(cmd, conn) < 0) {
+					conn->tx_response_queue = 0;
+					iscsit_tx_thread_wait_for_tcp(conn);
+					iscsit_unmap_iovec(cmd);
+					goto transport_err;
+				}
+			} else {
+				if (iscsit_send_tx_data(cmd, conn, use_misc) < 0) {
+					conn->tx_response_queue = 0;
+					iscsit_tx_thread_wait_for_tcp(conn);
+					iscsit_unmap_iovec(cmd);
+					goto transport_err;
+				}
+			}
+			map_sg = 0;
+			iscsit_unmap_iovec(cmd);
+
+			spin_lock_bh(&cmd->istate_lock);
+			switch (state) {
+			case ISTATE_SEND_DATAIN:
+				if (!eodr)
+					goto check_rsp_state;
+
+				if (eodr == 1) {
+					cmd->i_state = ISTATE_SENT_LAST_DATAIN;
+					sent_status = 1;
+					eodr = use_misc = 0;
+				} else if (eodr == 2) {
+					cmd->i_state = state =
+							ISTATE_SEND_STATUS;
+					sent_status = 0;
+					eodr = use_misc = 0;
+					goto check_rsp_state;
+				}
+				break;
+			case ISTATE_SEND_STATUS:
+				use_misc = 0;
+				sent_status = 1;
+				break;
+			case ISTATE_SEND_ASYNCMSG:
+			case ISTATE_SEND_NOPIN:
+			case ISTATE_SEND_STATUS_RECOVERY:
+			case ISTATE_SEND_TEXTRSP:
+				use_misc = 0;
+				sent_status = 1;
+				break;
+			case ISTATE_SEND_REJECT:
+				use_misc = 0;
+				if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) {
+					cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN;
+					spin_unlock_bh(&cmd->istate_lock);
+					complete(&cmd->reject_comp);
+					goto transport_err;
+				}
+				complete(&cmd->reject_comp);
+				break;
+			case ISTATE_SEND_TASKMGTRSP:
+				use_misc = 0;
+				sent_status = 1;
+				break;
+			case ISTATE_SEND_LOGOUTRSP:
+				spin_unlock_bh(&cmd->istate_lock);
+				if (!iscsit_logout_post_handler(cmd, conn))
+					goto restart;
+				spin_lock_bh(&cmd->istate_lock);
+				use_misc = 0;
+				sent_status = 1;
+				break;
+			default:
+				pr_err("Unknown Opcode: 0x%02x ITT:"
+					" 0x%08x, i_state: %d on CID: %hu\n",
+					cmd->iscsi_opcode, cmd->init_task_tag,
+					cmd->i_state, conn->cid);
+				spin_unlock_bh(&cmd->istate_lock);
+				goto transport_err;
+			}
+
+			if (sent_status) {
+				cmd->i_state = ISTATE_SENT_STATUS;
+				sent_status = 0;
+			}
+			spin_unlock_bh(&cmd->istate_lock);
+
+			if (atomic_read(&conn->check_immediate_queue))
+				goto get_immediate;
+
+			goto get_response;
+		} else
+			conn->tx_response_queue = 0;
+	}
+
+transport_err:
+	iscsit_take_action_for_connection_exit(conn);
+	goto restart;
+out:
+	return 0;
+}
+
+int iscsi_target_rx_thread(void *arg)
+{
+	int ret;
+	u8 buffer[ISCSI_HDR_LEN], opcode;
+	u32 checksum = 0, digest = 0;
+	struct iscsi_conn *conn = NULL;
+	struct iscsi_thread_set *ts = arg;
+	struct kvec iov;
+	/*
+	 * Allow ourselves to be interrupted by SIGINT so that a
+	 * connection recovery / failure event can be triggered externally.
+	 */
+	allow_signal(SIGINT);
+
+restart:
+	conn = iscsi_rx_thread_pre_handler(ts);
+	if (!conn)
+		goto out;
+
+	while (!kthread_should_stop()) {
+		/*
+		 * Ensure that both TX and RX per connection kthreads
+		 * are scheduled to run on the same CPU.
+		 */
+		iscsit_thread_check_cpumask(conn, current, 0);
+
+		memset(buffer, 0, ISCSI_HDR_LEN);
+		memset(&iov, 0, sizeof(struct kvec));
+
+		iov.iov_base	= buffer;
+		iov.iov_len	= ISCSI_HDR_LEN;
+
+		ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
+		if (ret != ISCSI_HDR_LEN) {
+			iscsit_rx_thread_wait_for_tcp(conn);
+			goto transport_err;
+		}
+
+		/*
+		 * Set conn->bad_hdr for use with REJECT PDUs.
+		 */
+		memcpy(&conn->bad_hdr, &buffer, ISCSI_HDR_LEN);
+
+		if (conn->conn_ops->HeaderDigest) {
+			iov.iov_base	= &digest;
+			iov.iov_len	= ISCSI_CRC_LEN;
+
+			ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
+			if (ret != ISCSI_CRC_LEN) {
+				iscsit_rx_thread_wait_for_tcp(conn);
+				goto transport_err;
+			}
+
+			iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+					buffer, ISCSI_HDR_LEN,
+					0, NULL, (u8 *)&checksum);
+
+			if (digest != checksum) {
+				pr_err("HeaderDigest CRC32C failed,"
+					" received 0x%08x, computed 0x%08x\n",
+					digest, checksum);
+				/*
+				 * Set the PDU to 0xff so it will intentionally
+				 * hit default in the switch below.
+				 */
+				memset(buffer, 0xff, ISCSI_HDR_LEN);
+				spin_lock_bh(&conn->sess->session_stats_lock);
+				conn->sess->conn_digest_errors++;
+				spin_unlock_bh(&conn->sess->session_stats_lock);
+			} else {
+				pr_debug("Got HeaderDigest CRC32C"
+						" 0x%08x\n", checksum);
+			}
+		}
+
+		if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
+			goto transport_err;
+
+		opcode = buffer[0] & ISCSI_OPCODE_MASK;
+
+		if (conn->sess->sess_ops->SessionType &&
+		   ((!(opcode & ISCSI_OP_TEXT)) ||
+		    (!(opcode & ISCSI_OP_LOGOUT)))) {
+			pr_err("Received illegal iSCSI Opcode: 0x%02x"
+			" while in Discovery Session, rejecting.\n", opcode);
+			iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+					buffer, conn);
+			goto transport_err;
+		}
+
+		switch (opcode) {
+		case ISCSI_OP_SCSI_CMD:
+			if (iscsit_handle_scsi_cmd(conn, buffer) < 0)
+				goto transport_err;
+			break;
+		case ISCSI_OP_SCSI_DATA_OUT:
+			if (iscsit_handle_data_out(conn, buffer) < 0)
+				goto transport_err;
+			break;
+		case ISCSI_OP_NOOP_OUT:
+			if (iscsit_handle_nop_out(conn, buffer) < 0)
+				goto transport_err;
+			break;
+		case ISCSI_OP_SCSI_TMFUNC:
+			if (iscsit_handle_task_mgt_cmd(conn, buffer) < 0)
+				goto transport_err;
+			break;
+		case ISCSI_OP_TEXT:
+			if (iscsit_handle_text_cmd(conn, buffer) < 0)
+				goto transport_err;
+			break;
+		case ISCSI_OP_LOGOUT:
+			ret = iscsit_handle_logout_cmd(conn, buffer);
+			if (ret > 0) {
+				wait_for_completion_timeout(&conn->conn_logout_comp,
+						SECONDS_FOR_LOGOUT_COMP * HZ);
+				goto transport_err;
+			} else if (ret < 0)
+				goto transport_err;
+			break;
+		case ISCSI_OP_SNACK:
+			if (iscsit_handle_snack(conn, buffer) < 0)
+				goto transport_err;
+			break;
+		default:
+			pr_err("Got unknown iSCSI OpCode: 0x%02x\n",
+					opcode);
+			if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+				pr_err("Cannot recover from unknown"
+				" opcode while ERL=0, closing iSCSI connection"
+				".\n");
+				goto transport_err;
+			}
+			if (!conn->conn_ops->OFMarker) {
+				pr_err("Unable to recover from unknown"
+				" opcode while OFMarker=No, closing iSCSI"
+					" connection.\n");
+				goto transport_err;
+			}
+			if (iscsit_recover_from_unknown_opcode(conn) < 0) {
+				pr_err("Unable to recover from unknown"
+					" opcode, closing iSCSI connection.\n");
+				goto transport_err;
+			}
+			break;
+		}
+	}
+
+transport_err:
+	if (!signal_pending(current))
+		atomic_set(&conn->transport_failed, 1);
+	iscsit_take_action_for_connection_exit(conn);
+	goto restart;
+out:
+	return 0;
+}
+
+static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
+{
+	struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
+	struct iscsi_session *sess = conn->sess;
+	/*
+	 * We expect this function to only ever be called from either RX or TX
+	 * thread context via iscsit_close_connection() once the other context
+	 * has been reset -> returned sleeping pre-handler state.
+	 */
+	spin_lock_bh(&conn->cmd_lock);
+	list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
+
+		list_del(&cmd->i_list);
+		spin_unlock_bh(&conn->cmd_lock);
+
+		iscsit_increment_maxcmdsn(cmd, sess);
+
+		iscsit_free_cmd(cmd);
+
+		spin_lock_bh(&conn->cmd_lock);
+	}
+	spin_unlock_bh(&conn->cmd_lock);
+}
+
+static void iscsit_stop_timers_for_cmds(
+	struct iscsi_conn *conn)
+{
+	struct iscsi_cmd *cmd;
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+		if (cmd->data_direction == DMA_TO_DEVICE)
+			iscsit_stop_dataout_timer(cmd);
+	}
+	spin_unlock_bh(&conn->cmd_lock);
+}
+
+int iscsit_close_connection(
+	struct iscsi_conn *conn)
+{
+	int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT);
+	struct iscsi_session	*sess = conn->sess;
+
+	pr_debug("Closing iSCSI connection CID %hu on SID:"
+		" %u\n", conn->cid, sess->sid);
+	/*
+	 * Always up conn_logout_comp just in case the RX Thread is sleeping
+	 * and the logout response never got sent because the connection
+	 * failed.
+	 */
+	complete(&conn->conn_logout_comp);
+
+	iscsi_release_thread_set(conn);
+
+	iscsit_stop_timers_for_cmds(conn);
+	iscsit_stop_nopin_response_timer(conn);
+	iscsit_stop_nopin_timer(conn);
+	iscsit_free_queue_reqs_for_conn(conn);
+
+	/*
+	 * During Connection recovery drop unacknowledged out of order
+	 * commands for this connection, and prepare the other commands
+	 * for realligence.
+	 *
+	 * During normal operation clear the out of order commands (but
+	 * do not free the struct iscsi_ooo_cmdsn's) and release all
+	 * struct iscsi_cmds.
+	 */
+	if (atomic_read(&conn->connection_recovery)) {
+		iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn);
+		iscsit_prepare_cmds_for_realligance(conn);
+	} else {
+		iscsit_clear_ooo_cmdsns_for_conn(conn);
+		iscsit_release_commands_from_conn(conn);
+	}
+
+	/*
+	 * Handle decrementing session or connection usage count if
+	 * a logout response was not able to be sent because the
+	 * connection failed.  Fall back to Session Recovery here.
+	 */
+	if (atomic_read(&conn->conn_logout_remove)) {
+		if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
+			iscsit_dec_conn_usage_count(conn);
+			iscsit_dec_session_usage_count(sess);
+		}
+		if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION)
+			iscsit_dec_conn_usage_count(conn);
+
+		atomic_set(&conn->conn_logout_remove, 0);
+		atomic_set(&sess->session_reinstatement, 0);
+		atomic_set(&sess->session_fall_back_to_erl0, 1);
+	}
+
+	spin_lock_bh(&sess->conn_lock);
+	list_del(&conn->conn_list);
+
+	/*
+	 * Attempt to let the Initiator know this connection failed by
+	 * sending an Connection Dropped Async Message on another
+	 * active connection.
+	 */
+	if (atomic_read(&conn->connection_recovery))
+		iscsit_build_conn_drop_async_message(conn);
+
+	spin_unlock_bh(&sess->conn_lock);
+
+	/*
+	 * If connection reinstatement is being performed on this connection,
+	 * up the connection reinstatement semaphore that is being blocked on
+	 * in iscsit_cause_connection_reinstatement().
+	 */
+	spin_lock_bh(&conn->state_lock);
+	if (atomic_read(&conn->sleep_on_conn_wait_comp)) {
+		spin_unlock_bh(&conn->state_lock);
+		complete(&conn->conn_wait_comp);
+		wait_for_completion(&conn->conn_post_wait_comp);
+		spin_lock_bh(&conn->state_lock);
+	}
+
+	/*
+	 * If connection reinstatement is being performed on this connection
+	 * by receiving a REMOVECONNFORRECOVERY logout request, up the
+	 * connection wait rcfr semaphore that is being blocked on
+	 * an iscsit_connection_reinstatement_rcfr().
+	 */
+	if (atomic_read(&conn->connection_wait_rcfr)) {
+		spin_unlock_bh(&conn->state_lock);
+		complete(&conn->conn_wait_rcfr_comp);
+		wait_for_completion(&conn->conn_post_wait_comp);
+		spin_lock_bh(&conn->state_lock);
+	}
+	atomic_set(&conn->connection_reinstatement, 1);
+	spin_unlock_bh(&conn->state_lock);
+
+	/*
+	 * If any other processes are accessing this connection pointer we
+	 * must wait until they have completed.
+	 */
+	iscsit_check_conn_usage_count(conn);
+
+	if (conn->conn_rx_hash.tfm)
+		crypto_free_hash(conn->conn_rx_hash.tfm);
+	if (conn->conn_tx_hash.tfm)
+		crypto_free_hash(conn->conn_tx_hash.tfm);
+
+	if (conn->conn_cpumask)
+		free_cpumask_var(conn->conn_cpumask);
+
+	kfree(conn->conn_ops);
+	conn->conn_ops = NULL;
+
+	if (conn->sock)
+		sock_release(conn->sock);
+	conn->thread_set = NULL;
+
+	pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
+	conn->conn_state = TARG_CONN_STATE_FREE;
+	kfree(conn);
+
+	spin_lock_bh(&sess->conn_lock);
+	atomic_dec(&sess->nconn);
+	pr_debug("Decremented iSCSI connection count to %hu from node:"
+		" %s\n", atomic_read(&sess->nconn),
+		sess->sess_ops->InitiatorName);
+	/*
+	 * Make sure that if one connection fails in an non ERL=2 iSCSI
+	 * Session that they all fail.
+	 */
+	if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout &&
+	     !atomic_read(&sess->session_logout))
+		atomic_set(&sess->session_fall_back_to_erl0, 1);
+
+	/*
+	 * If this was not the last connection in the session, and we are
+	 * performing session reinstatement or falling back to ERL=0, call
+	 * iscsit_stop_session() without sleeping to shutdown the other
+	 * active connections.
+	 */
+	if (atomic_read(&sess->nconn)) {
+		if (!atomic_read(&sess->session_reinstatement) &&
+		    !atomic_read(&sess->session_fall_back_to_erl0)) {
+			spin_unlock_bh(&sess->conn_lock);
+			return 0;
+		}
+		if (!atomic_read(&sess->session_stop_active)) {
+			atomic_set(&sess->session_stop_active, 1);
+			spin_unlock_bh(&sess->conn_lock);
+			iscsit_stop_session(sess, 0, 0);
+			return 0;
+		}
+		spin_unlock_bh(&sess->conn_lock);
+		return 0;
+	}
+
+	/*
+	 * If this was the last connection in the session and one of the
+	 * following is occurring:
+	 *
+	 * Session Reinstatement is not being performed, and are falling back
+	 * to ERL=0 call iscsit_close_session().
+	 *
+	 * Session Logout was requested.  iscsit_close_session() will be called
+	 * elsewhere.
+	 *
+	 * Session Continuation is not being performed, start the Time2Retain
+	 * handler and check if sleep_on_sess_wait_sem is active.
+	 */
+	if (!atomic_read(&sess->session_reinstatement) &&
+	     atomic_read(&sess->session_fall_back_to_erl0)) {
+		spin_unlock_bh(&sess->conn_lock);
+		target_put_session(sess->se_sess);
+
+		return 0;
+	} else if (atomic_read(&sess->session_logout)) {
+		pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
+		sess->session_state = TARG_SESS_STATE_FREE;
+		spin_unlock_bh(&sess->conn_lock);
+
+		if (atomic_read(&sess->sleep_on_sess_wait_comp))
+			complete(&sess->session_wait_comp);
+
+		return 0;
+	} else {
+		pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
+		sess->session_state = TARG_SESS_STATE_FAILED;
+
+		if (!atomic_read(&sess->session_continuation)) {
+			spin_unlock_bh(&sess->conn_lock);
+			iscsit_start_time2retain_handler(sess);
+		} else
+			spin_unlock_bh(&sess->conn_lock);
+
+		if (atomic_read(&sess->sleep_on_sess_wait_comp))
+			complete(&sess->session_wait_comp);
+
+		return 0;
+	}
+	spin_unlock_bh(&sess->conn_lock);
+
+	return 0;
+}
+
+int iscsit_close_session(struct iscsi_session *sess)
+{
+	struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+
+	if (atomic_read(&sess->nconn)) {
+		pr_err("%d connection(s) still exist for iSCSI session"
+			" to %s\n", atomic_read(&sess->nconn),
+			sess->sess_ops->InitiatorName);
+		BUG();
+	}
+
+	spin_lock_bh(&se_tpg->session_lock);
+	atomic_set(&sess->session_logout, 1);
+	atomic_set(&sess->session_reinstatement, 1);
+	iscsit_stop_time2retain_timer(sess);
+	spin_unlock_bh(&se_tpg->session_lock);
+
+	/*
+	 * transport_deregister_session_configfs() will clear the
+	 * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
+	 * can be setting it again with __transport_register_session() in
+	 * iscsi_post_login_handler() again after the iscsit_stop_session()
+	 * completes in iscsi_np context.
+	 */
+	transport_deregister_session_configfs(sess->se_sess);
+
+	/*
+	 * If any other processes are accessing this session pointer we must
+	 * wait until they have completed.  If we are in an interrupt (the
+	 * time2retain handler) and contain and active session usage count we
+	 * restart the timer and exit.
+	 */
+	if (!in_interrupt()) {
+		if (iscsit_check_session_usage_count(sess) == 1)
+			iscsit_stop_session(sess, 1, 1);
+	} else {
+		if (iscsit_check_session_usage_count(sess) == 2) {
+			atomic_set(&sess->session_logout, 0);
+			iscsit_start_time2retain_handler(sess);
+			return 0;
+		}
+	}
+
+	transport_deregister_session(sess->se_sess);
+
+	if (sess->sess_ops->ErrorRecoveryLevel == 2)
+		iscsit_free_connection_recovery_entires(sess);
+
+	iscsit_free_all_ooo_cmdsns(sess);
+
+	spin_lock_bh(&se_tpg->session_lock);
+	pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
+	sess->session_state = TARG_SESS_STATE_FREE;
+	pr_debug("Released iSCSI session from node: %s\n",
+			sess->sess_ops->InitiatorName);
+	tpg->nsessions--;
+	if (tpg->tpg_tiqn)
+		tpg->tpg_tiqn->tiqn_nsessions--;
+
+	pr_debug("Decremented number of active iSCSI Sessions on"
+		" iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
+
+	spin_lock(&sess_idr_lock);
+	idr_remove(&sess_idr, sess->session_index);
+	spin_unlock(&sess_idr_lock);
+
+	kfree(sess->sess_ops);
+	sess->sess_ops = NULL;
+	spin_unlock_bh(&se_tpg->session_lock);
+
+	kfree(sess);
+	return 0;
+}
+
+static void iscsit_logout_post_handler_closesession(
+	struct iscsi_conn *conn)
+{
+	struct iscsi_session *sess = conn->sess;
+
+	iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
+	iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
+
+	atomic_set(&conn->conn_logout_remove, 0);
+	complete(&conn->conn_logout_comp);
+
+	iscsit_dec_conn_usage_count(conn);
+	iscsit_stop_session(sess, 1, 1);
+	iscsit_dec_session_usage_count(sess);
+	target_put_session(sess->se_sess);
+}
+
+static void iscsit_logout_post_handler_samecid(
+	struct iscsi_conn *conn)
+{
+	iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
+	iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
+
+	atomic_set(&conn->conn_logout_remove, 0);
+	complete(&conn->conn_logout_comp);
+
+	iscsit_cause_connection_reinstatement(conn, 1);
+	iscsit_dec_conn_usage_count(conn);
+}
+
+static void iscsit_logout_post_handler_diffcid(
+	struct iscsi_conn *conn,
+	u16 cid)
+{
+	struct iscsi_conn *l_conn;
+	struct iscsi_session *sess = conn->sess;
+	bool conn_found = false;
+
+	if (!sess)
+		return;
+
+	spin_lock_bh(&sess->conn_lock);
+	list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
+		if (l_conn->cid == cid) {
+			iscsit_inc_conn_usage_count(l_conn);
+			conn_found = true;
+			break;
+		}
+	}
+	spin_unlock_bh(&sess->conn_lock);
+
+	if (!conn_found)
+		return;
+
+	if (l_conn->sock)
+		l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN);
+
+	spin_lock_bh(&l_conn->state_lock);
+	pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
+	l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
+	spin_unlock_bh(&l_conn->state_lock);
+
+	iscsit_cause_connection_reinstatement(l_conn, 1);
+	iscsit_dec_conn_usage_count(l_conn);
+}
+
+/*
+ *	Return of 0 causes the TX thread to restart.
+ */
+static int iscsit_logout_post_handler(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	int ret = 0;
+
+	switch (cmd->logout_reason) {
+	case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
+		switch (cmd->logout_response) {
+		case ISCSI_LOGOUT_SUCCESS:
+		case ISCSI_LOGOUT_CLEANUP_FAILED:
+		default:
+			iscsit_logout_post_handler_closesession(conn);
+			break;
+		}
+		ret = 0;
+		break;
+	case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
+		if (conn->cid == cmd->logout_cid) {
+			switch (cmd->logout_response) {
+			case ISCSI_LOGOUT_SUCCESS:
+			case ISCSI_LOGOUT_CLEANUP_FAILED:
+			default:
+				iscsit_logout_post_handler_samecid(conn);
+				break;
+			}
+			ret = 0;
+		} else {
+			switch (cmd->logout_response) {
+			case ISCSI_LOGOUT_SUCCESS:
+				iscsit_logout_post_handler_diffcid(conn,
+					cmd->logout_cid);
+				break;
+			case ISCSI_LOGOUT_CID_NOT_FOUND:
+			case ISCSI_LOGOUT_CLEANUP_FAILED:
+			default:
+				break;
+			}
+			ret = 1;
+		}
+		break;
+	case ISCSI_LOGOUT_REASON_RECOVERY:
+		switch (cmd->logout_response) {
+		case ISCSI_LOGOUT_SUCCESS:
+		case ISCSI_LOGOUT_CID_NOT_FOUND:
+		case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED:
+		case ISCSI_LOGOUT_CLEANUP_FAILED:
+		default:
+			break;
+		}
+		ret = 1;
+		break;
+	default:
+		break;
+
+	}
+	return ret;
+}
+
+void iscsit_fail_session(struct iscsi_session *sess)
+{
+	struct iscsi_conn *conn;
+
+	spin_lock_bh(&sess->conn_lock);
+	list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+		pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
+		conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
+	}
+	spin_unlock_bh(&sess->conn_lock);
+
+	pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
+	sess->session_state = TARG_SESS_STATE_FAILED;
+}
+
+int iscsit_free_session(struct iscsi_session *sess)
+{
+	u16 conn_count = atomic_read(&sess->nconn);
+	struct iscsi_conn *conn, *conn_tmp = NULL;
+	int is_last;
+
+	spin_lock_bh(&sess->conn_lock);
+	atomic_set(&sess->sleep_on_sess_wait_comp, 1);
+
+	list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
+			conn_list) {
+		if (conn_count == 0)
+			break;
+
+		if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
+			is_last = 1;
+		} else {
+			iscsit_inc_conn_usage_count(conn_tmp);
+			is_last = 0;
+		}
+		iscsit_inc_conn_usage_count(conn);
+
+		spin_unlock_bh(&sess->conn_lock);
+		iscsit_cause_connection_reinstatement(conn, 1);
+		spin_lock_bh(&sess->conn_lock);
+
+		iscsit_dec_conn_usage_count(conn);
+		if (is_last == 0)
+			iscsit_dec_conn_usage_count(conn_tmp);
+
+		conn_count--;
+	}
+
+	if (atomic_read(&sess->nconn)) {
+		spin_unlock_bh(&sess->conn_lock);
+		wait_for_completion(&sess->session_wait_comp);
+	} else
+		spin_unlock_bh(&sess->conn_lock);
+
+	target_put_session(sess->se_sess);
+	return 0;
+}
+
+void iscsit_stop_session(
+	struct iscsi_session *sess,
+	int session_sleep,
+	int connection_sleep)
+{
+	u16 conn_count = atomic_read(&sess->nconn);
+	struct iscsi_conn *conn, *conn_tmp = NULL;
+	int is_last;
+
+	spin_lock_bh(&sess->conn_lock);
+	if (session_sleep)
+		atomic_set(&sess->sleep_on_sess_wait_comp, 1);
+
+	if (connection_sleep) {
+		list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
+				conn_list) {
+			if (conn_count == 0)
+				break;
+
+			if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
+				is_last = 1;
+			} else {
+				iscsit_inc_conn_usage_count(conn_tmp);
+				is_last = 0;
+			}
+			iscsit_inc_conn_usage_count(conn);
+
+			spin_unlock_bh(&sess->conn_lock);
+			iscsit_cause_connection_reinstatement(conn, 1);
+			spin_lock_bh(&sess->conn_lock);
+
+			iscsit_dec_conn_usage_count(conn);
+			if (is_last == 0)
+				iscsit_dec_conn_usage_count(conn_tmp);
+			conn_count--;
+		}
+	} else {
+		list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
+			iscsit_cause_connection_reinstatement(conn, 0);
+	}
+
+	if (session_sleep && atomic_read(&sess->nconn)) {
+		spin_unlock_bh(&sess->conn_lock);
+		wait_for_completion(&sess->session_wait_comp);
+	} else
+		spin_unlock_bh(&sess->conn_lock);
+}
+
+int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
+{
+	struct iscsi_session *sess;
+	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+	struct se_session *se_sess, *se_sess_tmp;
+	int session_count = 0;
+
+	spin_lock_bh(&se_tpg->session_lock);
+	if (tpg->nsessions && !force) {
+		spin_unlock_bh(&se_tpg->session_lock);
+		return -1;
+	}
+
+	list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
+			sess_list) {
+		sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+
+		spin_lock(&sess->conn_lock);
+		if (atomic_read(&sess->session_fall_back_to_erl0) ||
+		    atomic_read(&sess->session_logout) ||
+		    (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+			spin_unlock(&sess->conn_lock);
+			continue;
+		}
+		atomic_set(&sess->session_reinstatement, 1);
+		spin_unlock(&sess->conn_lock);
+		spin_unlock_bh(&se_tpg->session_lock);
+
+		iscsit_free_session(sess);
+		spin_lock_bh(&se_tpg->session_lock);
+
+		session_count++;
+	}
+	spin_unlock_bh(&se_tpg->session_lock);
+
+	pr_debug("Released %d iSCSI Session(s) from Target Portal"
+			" Group: %hu\n", session_count, tpg->tpgt);
+	return 0;
+}
+
+MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure");
+MODULE_VERSION("4.1.x");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(iscsi_target_init_module);
+module_exit(iscsi_target_cleanup_module);
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target.h b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target.h
new file mode 100644
index 0000000..5db2dde
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target.h
@@ -0,0 +1,42 @@
+#ifndef ISCSI_TARGET_H
+#define ISCSI_TARGET_H
+
+extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *);
+extern struct iscsi_tiqn *iscsit_get_tiqn(unsigned char *, int);
+extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *);
+extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *);
+extern void iscsit_del_tiqn(struct iscsi_tiqn *);
+extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *);
+extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *);
+extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
+				char *, int);
+extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
+				struct iscsi_portal_group *);
+extern int iscsit_del_np(struct iscsi_np *);
+extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *, struct iscsi_cmd *);
+extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8);
+extern int iscsit_send_r2t(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, int);
+extern void iscsit_thread_get_cpumask(struct iscsi_conn *);
+extern int iscsi_target_tx_thread(void *);
+extern int iscsi_target_rx_thread(void *);
+extern int iscsit_close_connection(struct iscsi_conn *);
+extern int iscsit_close_session(struct iscsi_session *);
+extern void iscsit_fail_session(struct iscsi_session *);
+extern int iscsit_free_session(struct iscsi_session *);
+extern void iscsit_stop_session(struct iscsi_session *, int, int);
+extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
+
+extern struct iscsit_global *iscsit_global;
+extern struct target_fabric_configfs *lio_target_fabric_configfs;
+
+extern struct kmem_cache *lio_dr_cache;
+extern struct kmem_cache *lio_ooo_cache;
+extern struct kmem_cache *lio_cmd_cache;
+extern struct kmem_cache *lio_qr_cache;
+extern struct kmem_cache *lio_r2t_cache;
+
+#endif   /*** ISCSI_TARGET_H ***/
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_auth.c b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_auth.c
new file mode 100644
index 0000000..cbb04f3
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_auth.c
@@ -0,0 +1,484 @@
+/*******************************************************************************
+ * This file houses the main functions for the iSCSI CHAP support
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/scatterlist.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_nego.h"
+#include "iscsi_target_auth.h"
+
+static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
+{
+	int j = DIV_ROUND_UP(len, 2), rc;
+
+	rc = hex2bin(dst, src, j);
+	if (rc < 0)
+		pr_debug("CHAP string contains non hex digit symbols\n");
+
+	dst[j] = '\0';
+	return j;
+}
+
+static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
+{
+	int i;
+
+	for (i = 0; i < src_len; i++) {
+		sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
+	}
+}
+
+static void chap_set_random(char *data, int length)
+{
+	long r;
+	unsigned n;
+
+	while (length > 0) {
+		get_random_bytes(&r, sizeof(long));
+		r = r ^ (r >> 8);
+		r = r ^ (r >> 4);
+		n = r & 0x7;
+
+		get_random_bytes(&r, sizeof(long));
+		r = r ^ (r >> 8);
+		r = r ^ (r >> 5);
+		n = (n << 3) | (r & 0x7);
+
+		get_random_bytes(&r, sizeof(long));
+		r = r ^ (r >> 8);
+		r = r ^ (r >> 5);
+		n = (n << 2) | (r & 0x3);
+
+		*data++ = n;
+		length--;
+	}
+}
+
+static void chap_gen_challenge(
+	struct iscsi_conn *conn,
+	int caller,
+	char *c_str,
+	unsigned int *c_len)
+{
+	unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
+	struct iscsi_chap *chap = conn->auth_protocol;
+
+	memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
+
+	chap_set_random(chap->challenge, CHAP_CHALLENGE_LENGTH);
+	chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
+				CHAP_CHALLENGE_LENGTH);
+	/*
+	 * Set CHAP_C, and copy the generated challenge into c_str.
+	 */
+	*c_len += sprintf(c_str + *c_len, "CHAP_C=0x%s", challenge_asciihex);
+	*c_len += 1;
+
+	pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client",
+			challenge_asciihex);
+}
+
+
+static struct iscsi_chap *chap_server_open(
+	struct iscsi_conn *conn,
+	struct iscsi_node_auth *auth,
+	const char *a_str,
+	char *aic_str,
+	unsigned int *aic_len)
+{
+	struct iscsi_chap *chap;
+
+	if (!(auth->naf_flags & NAF_USERID_SET) ||
+	    !(auth->naf_flags & NAF_PASSWORD_SET)) {
+		pr_err("CHAP user or password not set for"
+				" Initiator ACL\n");
+		return NULL;
+	}
+
+	conn->auth_protocol = kzalloc(sizeof(struct iscsi_chap), GFP_KERNEL);
+	if (!conn->auth_protocol)
+		return NULL;
+
+	chap = conn->auth_protocol;
+	/*
+	 * We only support MD5 MDA presently.
+	 */
+	if (strncmp(a_str, "CHAP_A=5", 8)) {
+		pr_err("CHAP_A is not MD5.\n");
+		return NULL;
+	}
+	pr_debug("[server] Got CHAP_A=5\n");
+	/*
+	 * Send back CHAP_A set to MD5.
+	 */
+	*aic_len = sprintf(aic_str, "CHAP_A=5");
+	*aic_len += 1;
+	chap->digest_type = CHAP_DIGEST_MD5;
+	pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
+	/*
+	 * Set Identifier.
+	 */
+	chap->id = ISCSI_TPG_C(conn)->tpg_chap_id++;
+	*aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id);
+	*aic_len += 1;
+	pr_debug("[server] Sending CHAP_I=%d\n", chap->id);
+	/*
+	 * Generate Challenge.
+	 */
+	chap_gen_challenge(conn, 1, aic_str, aic_len);
+
+	return chap;
+}
+
+static void chap_close(struct iscsi_conn *conn)
+{
+	kfree(conn->auth_protocol);
+	conn->auth_protocol = NULL;
+}
+
+static int chap_server_compute_md5(
+	struct iscsi_conn *conn,
+	struct iscsi_node_auth *auth,
+	char *nr_in_ptr,
+	char *nr_out_ptr,
+	unsigned int *nr_out_len)
+{
+	char *endptr;
+	unsigned long id;
+	unsigned char id_as_uchar;
+	unsigned char digest[MD5_SIGNATURE_SIZE];
+	unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
+	unsigned char identifier[10], *challenge = NULL;
+	unsigned char *challenge_binhex = NULL;
+	unsigned char client_digest[MD5_SIGNATURE_SIZE];
+	unsigned char server_digest[MD5_SIGNATURE_SIZE];
+	unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
+	size_t compare_len;
+	struct iscsi_chap *chap = conn->auth_protocol;
+	struct crypto_hash *tfm;
+	struct hash_desc desc;
+	struct scatterlist sg;
+	int auth_ret = -1, ret, challenge_len;
+
+	memset(identifier, 0, 10);
+	memset(chap_n, 0, MAX_CHAP_N_SIZE);
+	memset(chap_r, 0, MAX_RESPONSE_LENGTH);
+	memset(digest, 0, MD5_SIGNATURE_SIZE);
+	memset(response, 0, MD5_SIGNATURE_SIZE * 2 + 2);
+	memset(client_digest, 0, MD5_SIGNATURE_SIZE);
+	memset(server_digest, 0, MD5_SIGNATURE_SIZE);
+
+	challenge = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
+	if (!challenge) {
+		pr_err("Unable to allocate challenge buffer\n");
+		goto out;
+	}
+
+	challenge_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
+	if (!challenge_binhex) {
+		pr_err("Unable to allocate challenge_binhex buffer\n");
+		goto out;
+	}
+	/*
+	 * Extract CHAP_N.
+	 */
+	if (extract_param(nr_in_ptr, "CHAP_N", MAX_CHAP_N_SIZE, chap_n,
+				&type) < 0) {
+		pr_err("Could not find CHAP_N.\n");
+		goto out;
+	}
+	if (type == HEX) {
+		pr_err("Could not find CHAP_N.\n");
+		goto out;
+	}
+
+	/* Include the terminating NULL in the compare */
+	compare_len = strlen(auth->userid) + 1;
+	if (strncmp(chap_n, auth->userid, compare_len) != 0) {
+		pr_err("CHAP_N values do not match!\n");
+		goto out;
+	}
+	pr_debug("[server] Got CHAP_N=%s\n", chap_n);
+	/*
+	 * Extract CHAP_R.
+	 */
+	if (extract_param(nr_in_ptr, "CHAP_R", MAX_RESPONSE_LENGTH, chap_r,
+				&type) < 0) {
+		pr_err("Could not find CHAP_R.\n");
+		goto out;
+	}
+	if (type != HEX) {
+		pr_err("Could not find CHAP_R.\n");
+		goto out;
+	}
+
+	pr_debug("[server] Got CHAP_R=%s\n", chap_r);
+	chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
+
+	tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm)) {
+		pr_err("Unable to allocate struct crypto_hash\n");
+		goto out;
+	}
+	desc.tfm = tfm;
+	desc.flags = 0;
+
+	ret = crypto_hash_init(&desc);
+	if (ret < 0) {
+		pr_err("crypto_hash_init() failed\n");
+		crypto_free_hash(tfm);
+		goto out;
+	}
+
+	sg_init_one(&sg, &chap->id, 1);
+	ret = crypto_hash_update(&desc, &sg, 1);
+	if (ret < 0) {
+		pr_err("crypto_hash_update() failed for id\n");
+		crypto_free_hash(tfm);
+		goto out;
+	}
+
+	sg_init_one(&sg, &auth->password, strlen(auth->password));
+	ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
+	if (ret < 0) {
+		pr_err("crypto_hash_update() failed for password\n");
+		crypto_free_hash(tfm);
+		goto out;
+	}
+
+	sg_init_one(&sg, chap->challenge, CHAP_CHALLENGE_LENGTH);
+	ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
+	if (ret < 0) {
+		pr_err("crypto_hash_update() failed for challenge\n");
+		crypto_free_hash(tfm);
+		goto out;
+	}
+
+	ret = crypto_hash_final(&desc, server_digest);
+	if (ret < 0) {
+		pr_err("crypto_hash_final() failed for server digest\n");
+		crypto_free_hash(tfm);
+		goto out;
+	}
+	crypto_free_hash(tfm);
+
+	chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
+	pr_debug("[server] MD5 Server Digest: %s\n", response);
+
+	if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
+		pr_debug("[server] MD5 Digests do not match!\n\n");
+		goto out;
+	} else
+		pr_debug("[server] MD5 Digests match, CHAP connetication"
+				" successful.\n\n");
+	/*
+	 * One way authentication has succeeded, return now if mutual
+	 * authentication is not enabled.
+	 */
+	if (!auth->authenticate_target) {
+		kfree(challenge);
+		kfree(challenge_binhex);
+		return 0;
+	}
+	/*
+	 * Get CHAP_I.
+	 */
+	if (extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type) < 0) {
+		pr_err("Could not find CHAP_I.\n");
+		goto out;
+	}
+
+	if (type == HEX)
+		id = simple_strtoul(&identifier[2], &endptr, 0);
+	else
+		id = simple_strtoul(identifier, &endptr, 0);
+	if (id > 255) {
+		pr_err("chap identifier: %lu greater than 255\n", id);
+		goto out;
+	}
+	/*
+	 * RFC 1994 says Identifier is no more than octet (8 bits).
+	 */
+	pr_debug("[server] Got CHAP_I=%lu\n", id);
+	/*
+	 * Get CHAP_C.
+	 */
+	if (extract_param(nr_in_ptr, "CHAP_C", CHAP_CHALLENGE_STR_LEN,
+			challenge, &type) < 0) {
+		pr_err("Could not find CHAP_C.\n");
+		goto out;
+	}
+
+	if (type != HEX) {
+		pr_err("Could not find CHAP_C.\n");
+		goto out;
+	}
+	pr_debug("[server] Got CHAP_C=%s\n", challenge);
+	challenge_len = chap_string_to_hex(challenge_binhex, challenge,
+				strlen(challenge));
+	if (!challenge_len) {
+		pr_err("Unable to convert incoming challenge\n");
+		goto out;
+	}
+	/*
+	 * During mutual authentication, the CHAP_C generated by the
+	 * initiator must not match the original CHAP_C generated by
+	 * the target.
+	 */
+	if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) {
+		pr_err("initiator CHAP_C matches target CHAP_C, failing"
+		       " login attempt\n");
+		goto out;
+	}
+	/*
+	 * Generate CHAP_N and CHAP_R for mutual authentication.
+	 */
+	tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm)) {
+		pr_err("Unable to allocate struct crypto_hash\n");
+		goto out;
+	}
+	desc.tfm = tfm;
+	desc.flags = 0;
+
+	ret = crypto_hash_init(&desc);
+	if (ret < 0) {
+		pr_err("crypto_hash_init() failed\n");
+		crypto_free_hash(tfm);
+		goto out;
+	}
+
+	/* To handle both endiannesses */
+	id_as_uchar = id;
+	sg_init_one(&sg, &id_as_uchar, 1);
+	ret = crypto_hash_update(&desc, &sg, 1);
+	if (ret < 0) {
+		pr_err("crypto_hash_update() failed for id\n");
+		crypto_free_hash(tfm);
+		goto out;
+	}
+
+	sg_init_one(&sg, auth->password_mutual,
+				strlen(auth->password_mutual));
+	ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
+	if (ret < 0) {
+		pr_err("crypto_hash_update() failed for"
+				" password_mutual\n");
+		crypto_free_hash(tfm);
+		goto out;
+	}
+	/*
+	 * Convert received challenge to binary hex.
+	 */
+	sg_init_one(&sg, challenge_binhex, challenge_len);
+	ret = crypto_hash_update(&desc, &sg, challenge_len);
+	if (ret < 0) {
+		pr_err("crypto_hash_update() failed for ma challenge\n");
+		crypto_free_hash(tfm);
+		goto out;
+	}
+
+	ret = crypto_hash_final(&desc, digest);
+	if (ret < 0) {
+		pr_err("crypto_hash_final() failed for ma digest\n");
+		crypto_free_hash(tfm);
+		goto out;
+	}
+	crypto_free_hash(tfm);
+	/*
+	 * Generate CHAP_N and CHAP_R.
+	 */
+	*nr_out_len = sprintf(nr_out_ptr, "CHAP_N=%s", auth->userid_mutual);
+	*nr_out_len += 1;
+	pr_debug("[server] Sending CHAP_N=%s\n", auth->userid_mutual);
+	/*
+	 * Convert response from binary hex to ascii hext.
+	 */
+	chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
+	*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
+			response);
+	*nr_out_len += 1;
+	pr_debug("[server] Sending CHAP_R=0x%s\n", response);
+	auth_ret = 0;
+out:
+	kfree(challenge);
+	kfree(challenge_binhex);
+	return auth_ret;
+}
+
+static int chap_got_response(
+	struct iscsi_conn *conn,
+	struct iscsi_node_auth *auth,
+	char *nr_in_ptr,
+	char *nr_out_ptr,
+	unsigned int *nr_out_len)
+{
+	struct iscsi_chap *chap = conn->auth_protocol;
+
+	switch (chap->digest_type) {
+	case CHAP_DIGEST_MD5:
+		if (chap_server_compute_md5(conn, auth, nr_in_ptr,
+				nr_out_ptr, nr_out_len) < 0)
+			return -1;
+		return 0;
+	default:
+		pr_err("Unknown CHAP digest type %d!\n",
+				chap->digest_type);
+		return -1;
+	}
+}
+
+u32 chap_main_loop(
+	struct iscsi_conn *conn,
+	struct iscsi_node_auth *auth,
+	char *in_text,
+	char *out_text,
+	int *in_len,
+	int *out_len)
+{
+	struct iscsi_chap *chap = conn->auth_protocol;
+
+	if (!chap) {
+		chap = chap_server_open(conn, auth, in_text, out_text, out_len);
+		if (!chap)
+			return 2;
+		chap->chap_state = CHAP_STAGE_SERVER_AIC;
+		return 0;
+	} else if (chap->chap_state == CHAP_STAGE_SERVER_AIC) {
+		convert_null_to_semi(in_text, *in_len);
+		if (chap_got_response(conn, auth, in_text, out_text,
+				out_len) < 0) {
+			chap_close(conn);
+			return 2;
+		}
+		if (auth->authenticate_target)
+			chap->chap_state = CHAP_STAGE_SERVER_NR;
+		else
+			*out_len = 0;
+		chap_close(conn);
+		return 1;
+	}
+
+	return 2;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_auth.h b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_auth.h
new file mode 100644
index 0000000..2f463c0
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_auth.h
@@ -0,0 +1,31 @@
+#ifndef _ISCSI_CHAP_H_
+#define _ISCSI_CHAP_H_
+
+#define CHAP_DIGEST_MD5		5
+#define CHAP_DIGEST_SHA		6
+
+#define CHAP_CHALLENGE_LENGTH	16
+#define CHAP_CHALLENGE_STR_LEN	4096
+#define MAX_RESPONSE_LENGTH	64	/* sufficient for MD5 */
+#define	MAX_CHAP_N_SIZE		512
+
+#define MD5_SIGNATURE_SIZE	16	/* 16 bytes in a MD5 message digest */
+
+#define CHAP_STAGE_CLIENT_A	1
+#define CHAP_STAGE_SERVER_AIC	2
+#define CHAP_STAGE_CLIENT_NR	3
+#define CHAP_STAGE_CLIENT_NRIC	4
+#define CHAP_STAGE_SERVER_NR	5
+
+extern u32 chap_main_loop(struct iscsi_conn *, struct iscsi_node_auth *, char *, char *,
+				int *, int *);
+
+struct iscsi_chap {
+	unsigned char	digest_type;
+	unsigned char	id;
+	unsigned char	challenge[CHAP_CHALLENGE_LENGTH];
+	unsigned int	authenticate_target;
+	unsigned int	chap_state;
+} ____cacheline_aligned;
+
+#endif   /*** _ISCSI_CHAP_H_ ***/
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_configfs.c b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_configfs.c
new file mode 100644
index 0000000..facb3ad
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_configfs.c
@@ -0,0 +1,1832 @@
+/*******************************************************************************
+ * This file contains the configfs implementation for iSCSI Target mode
+ * from the LIO-Target Project.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/configfs.h>
+#include <linux/export.h>
+#include <linux/inet.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_nodeattrib.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_stat.h"
+#include "iscsi_target_configfs.h"
+
+struct target_fabric_configfs *lio_target_fabric_configfs;
+
+struct lio_target_configfs_attribute {
+	struct configfs_attribute attr;
+	ssize_t (*show)(void *, char *);
+	ssize_t (*store)(void *, const char *, size_t);
+};
+
+struct iscsi_portal_group *lio_get_tpg_from_tpg_item(
+	struct config_item *item,
+	struct iscsi_tiqn **tiqn_out)
+{
+	struct se_portal_group *se_tpg = container_of(to_config_group(item),
+					struct se_portal_group, tpg_group);
+	struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+	int ret;
+
+	if (!tpg) {
+		pr_err("Unable to locate struct iscsi_portal_group "
+			"pointer\n");
+		return NULL;
+	}
+	ret = iscsit_get_tpg(tpg);
+	if (ret < 0)
+		return NULL;
+
+	*tiqn_out = tpg->tpg_tiqn;
+	return tpg;
+}
+
+/* Start items for lio_target_portal_cit */
+
+static ssize_t lio_target_np_show_sctp(
+	struct se_tpg_np *se_tpg_np,
+	char *page)
+{
+	struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
+				struct iscsi_tpg_np, se_tpg_np);
+	struct iscsi_tpg_np *tpg_np_sctp;
+	ssize_t rb;
+
+	tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
+	if (tpg_np_sctp)
+		rb = sprintf(page, "1\n");
+	else
+		rb = sprintf(page, "0\n");
+
+	return rb;
+}
+
+static ssize_t lio_target_np_store_sctp(
+	struct se_tpg_np *se_tpg_np,
+	const char *page,
+	size_t count)
+{
+	struct iscsi_np *np;
+	struct iscsi_portal_group *tpg;
+	struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
+				struct iscsi_tpg_np, se_tpg_np);
+	struct iscsi_tpg_np *tpg_np_sctp = NULL;
+	char *endptr;
+	u32 op;
+	int ret;
+
+	op = simple_strtoul(page, &endptr, 0);
+	if ((op != 1) && (op != 0)) {
+		pr_err("Illegal value for tpg_enable: %u\n", op);
+		return -EINVAL;
+	}
+	np = tpg_np->tpg_np;
+	if (!np) {
+		pr_err("Unable to locate struct iscsi_np from"
+				" struct iscsi_tpg_np\n");
+		return -EINVAL;
+	}
+
+	tpg = tpg_np->tpg;
+	if (iscsit_get_tpg(tpg) < 0)
+		return -EINVAL;
+
+	if (op) {
+		/*
+		 * Use existing np->np_sockaddr for SCTP network portal reference
+		 */
+		tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
+					np->np_ip, tpg_np, ISCSI_SCTP_TCP);
+		if (!tpg_np_sctp || IS_ERR(tpg_np_sctp))
+			goto out;
+	} else {
+		tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
+		if (!tpg_np_sctp)
+			goto out;
+
+		ret = iscsit_tpg_del_network_portal(tpg, tpg_np_sctp);
+		if (ret < 0)
+			goto out;
+	}
+
+	iscsit_put_tpg(tpg);
+	return count;
+out:
+	iscsit_put_tpg(tpg);
+	return -EINVAL;
+}
+
+TF_NP_BASE_ATTR(lio_target, sctp, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_portal_attrs[] = {
+	&lio_target_np_sctp.attr,
+	NULL,
+};
+
+/* Stop items for lio_target_portal_cit */
+
+/* Start items for lio_target_np_cit */
+
+#define MAX_PORTAL_LEN		256
+
+struct se_tpg_np *lio_target_call_addnptotpg(
+	struct se_portal_group *se_tpg,
+	struct config_group *group,
+	const char *name)
+{
+	struct iscsi_portal_group *tpg;
+	struct iscsi_tpg_np *tpg_np;
+	char *str, *str2, *ip_str, *port_str;
+	struct __kernel_sockaddr_storage sockaddr;
+	struct sockaddr_in *sock_in;
+	struct sockaddr_in6 *sock_in6;
+	unsigned long port;
+	int ret;
+	char buf[MAX_PORTAL_LEN + 1];
+
+	if (strlen(name) > MAX_PORTAL_LEN) {
+		pr_err("strlen(name): %d exceeds MAX_PORTAL_LEN: %d\n",
+			(int)strlen(name), MAX_PORTAL_LEN);
+		return ERR_PTR(-EOVERFLOW);
+	}
+	memset(buf, 0, MAX_PORTAL_LEN + 1);
+	snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name);
+
+	memset(&sockaddr, 0, sizeof(struct __kernel_sockaddr_storage));
+
+	str = strstr(buf, "[");
+	if (str) {
+		const char *end;
+
+		str2 = strstr(str, "]");
+		if (!str2) {
+			pr_err("Unable to locate trailing \"]\""
+				" in IPv6 iSCSI network portal address\n");
+			return ERR_PTR(-EINVAL);
+		}
+		str++; /* Skip over leading "[" */
+		*str2 = '\0'; /* Terminate the IPv6 address */
+		str2++; /* Skip over the "]" */
+		port_str = strstr(str2, ":");
+		if (!port_str) {
+			pr_err("Unable to locate \":port\""
+				" in IPv6 iSCSI network portal address\n");
+			return ERR_PTR(-EINVAL);
+		}
+		*port_str = '\0'; /* Terminate string for IP */
+		port_str++; /* Skip over ":" */
+
+		ret = strict_strtoul(port_str, 0, &port);
+		if (ret < 0) {
+			pr_err("strict_strtoul() failed for port_str: %d\n", ret);
+			return ERR_PTR(ret);
+		}
+		sock_in6 = (struct sockaddr_in6 *)&sockaddr;
+		sock_in6->sin6_family = AF_INET6;
+		sock_in6->sin6_port = htons((unsigned short)port);
+		ret = in6_pton(str, IPV6_ADDRESS_SPACE,
+				(void *)&sock_in6->sin6_addr.in6_u, -1, &end);
+		if (ret <= 0) {
+			pr_err("in6_pton returned: %d\n", ret);
+			return ERR_PTR(-EINVAL);
+		}
+	} else {
+		str = ip_str = &buf[0];
+		port_str = strstr(ip_str, ":");
+		if (!port_str) {
+			pr_err("Unable to locate \":port\""
+				" in IPv4 iSCSI network portal address\n");
+			return ERR_PTR(-EINVAL);
+		}
+		*port_str = '\0'; /* Terminate string for IP */
+		port_str++; /* Skip over ":" */
+
+		ret = strict_strtoul(port_str, 0, &port);
+		if (ret < 0) {
+			pr_err("strict_strtoul() failed for port_str: %d\n", ret);
+			return ERR_PTR(ret);
+		}
+		sock_in = (struct sockaddr_in *)&sockaddr;
+		sock_in->sin_family = AF_INET;
+		sock_in->sin_port = htons((unsigned short)port);
+		sock_in->sin_addr.s_addr = in_aton(ip_str);
+	}
+	tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+	ret = iscsit_get_tpg(tpg);
+	if (ret < 0)
+		return ERR_PTR(-EINVAL);
+
+	pr_debug("LIO_Target_ConfigFS: REGISTER -> %s TPGT: %hu"
+		" PORTAL: %s\n",
+		config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
+		tpg->tpgt, name);
+	/*
+	 * Assume ISCSI_TCP by default.  Other network portals for other
+	 * iSCSI fabrics:
+	 *
+	 * Traditional iSCSI over SCTP (initial support)
+	 * iSER/TCP (TODO, hardware available)
+	 * iSER/SCTP (TODO, software emulation with osc-iwarp)
+	 * iSER/IB (TODO, hardware available)
+	 *
+	 * can be enabled with atributes under
+	 * sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/
+	 *
+	 */
+	tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, str, NULL,
+				ISCSI_TCP);
+	if (IS_ERR(tpg_np)) {
+		iscsit_put_tpg(tpg);
+		return ERR_CAST(tpg_np);
+	}
+	pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n");
+
+	iscsit_put_tpg(tpg);
+	return &tpg_np->se_tpg_np;
+}
+
+static void lio_target_call_delnpfromtpg(
+	struct se_tpg_np *se_tpg_np)
+{
+	struct iscsi_portal_group *tpg;
+	struct iscsi_tpg_np *tpg_np;
+	struct se_portal_group *se_tpg;
+	int ret;
+
+	tpg_np = container_of(se_tpg_np, struct iscsi_tpg_np, se_tpg_np);
+	tpg = tpg_np->tpg;
+	ret = iscsit_get_tpg(tpg);
+	if (ret < 0)
+		return;
+
+	se_tpg = &tpg->tpg_se_tpg;
+	pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s TPGT: %hu"
+		" PORTAL: %s:%hu\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
+		tpg->tpgt, tpg_np->tpg_np->np_ip, tpg_np->tpg_np->np_port);
+
+	ret = iscsit_tpg_del_network_portal(tpg, tpg_np);
+	if (ret < 0)
+		goto out;
+
+	pr_debug("LIO_Target_ConfigFS: delnpfromtpg done!\n");
+out:
+	iscsit_put_tpg(tpg);
+}
+
+/* End items for lio_target_np_cit */
+
+/* Start items for lio_target_nacl_attrib_cit */
+
+#define DEF_NACL_ATTRIB(name)						\
+static ssize_t iscsi_nacl_attrib_show_##name(				\
+	struct se_node_acl *se_nacl,					\
+	char *page)							\
+{									\
+	struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
+					se_node_acl);			\
+									\
+	return sprintf(page, "%u\n", ISCSI_NODE_ATTRIB(nacl)->name);	\
+}									\
+									\
+static ssize_t iscsi_nacl_attrib_store_##name(				\
+	struct se_node_acl *se_nacl,					\
+	const char *page,						\
+	size_t count)							\
+{									\
+	struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
+					se_node_acl);			\
+	char *endptr;							\
+	u32 val;							\
+	int ret;							\
+									\
+	val = simple_strtoul(page, &endptr, 0);				\
+	ret = iscsit_na_##name(nacl, val);				\
+	if (ret < 0)							\
+		return ret;						\
+									\
+	return count;							\
+}
+
+#define NACL_ATTR(_name, _mode) TF_NACL_ATTRIB_ATTR(iscsi, _name, _mode);
+/*
+ * Define iscsi_node_attrib_s_dataout_timeout
+ */
+DEF_NACL_ATTRIB(dataout_timeout);
+NACL_ATTR(dataout_timeout, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_dataout_timeout_retries
+ */
+DEF_NACL_ATTRIB(dataout_timeout_retries);
+NACL_ATTR(dataout_timeout_retries, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_default_erl
+ */
+DEF_NACL_ATTRIB(default_erl);
+NACL_ATTR(default_erl, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_nopin_timeout
+ */
+DEF_NACL_ATTRIB(nopin_timeout);
+NACL_ATTR(nopin_timeout, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_nopin_response_timeout
+ */
+DEF_NACL_ATTRIB(nopin_response_timeout);
+NACL_ATTR(nopin_response_timeout, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_random_datain_pdu_offsets
+ */
+DEF_NACL_ATTRIB(random_datain_pdu_offsets);
+NACL_ATTR(random_datain_pdu_offsets, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_random_datain_seq_offsets
+ */
+DEF_NACL_ATTRIB(random_datain_seq_offsets);
+NACL_ATTR(random_datain_seq_offsets, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_random_r2t_offsets
+ */
+DEF_NACL_ATTRIB(random_r2t_offsets);
+NACL_ATTR(random_r2t_offsets, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_nacl_attrib_attrs[] = {
+	&iscsi_nacl_attrib_dataout_timeout.attr,
+	&iscsi_nacl_attrib_dataout_timeout_retries.attr,
+	&iscsi_nacl_attrib_default_erl.attr,
+	&iscsi_nacl_attrib_nopin_timeout.attr,
+	&iscsi_nacl_attrib_nopin_response_timeout.attr,
+	&iscsi_nacl_attrib_random_datain_pdu_offsets.attr,
+	&iscsi_nacl_attrib_random_datain_seq_offsets.attr,
+	&iscsi_nacl_attrib_random_r2t_offsets.attr,
+	NULL,
+};
+
+/* End items for lio_target_nacl_attrib_cit */
+
+/* Start items for lio_target_nacl_auth_cit */
+
+#define __DEF_NACL_AUTH_STR(prefix, name, flags)			\
+static ssize_t __iscsi_##prefix##_show_##name(				\
+	struct iscsi_node_acl *nacl,					\
+	char *page)							\
+{									\
+	struct iscsi_node_auth *auth = &nacl->node_auth;		\
+									\
+	if (!capable(CAP_SYS_ADMIN))					\
+		return -EPERM;						\
+	return snprintf(page, PAGE_SIZE, "%s\n", auth->name);		\
+}									\
+									\
+static ssize_t __iscsi_##prefix##_store_##name(				\
+	struct iscsi_node_acl *nacl,					\
+	const char *page,						\
+	size_t count)							\
+{									\
+	struct iscsi_node_auth *auth = &nacl->node_auth;		\
+									\
+	if (!capable(CAP_SYS_ADMIN))					\
+		return -EPERM;						\
+									\
+	snprintf(auth->name, sizeof(auth->name), "%s", page);		\
+	if (!strncmp("NULL", auth->name, 4))				\
+		auth->naf_flags &= ~flags;				\
+	else								\
+		auth->naf_flags |= flags;				\
+									\
+	if ((auth->naf_flags & NAF_USERID_IN_SET) &&			\
+	    (auth->naf_flags & NAF_PASSWORD_IN_SET))			\
+		auth->authenticate_target = 1;				\
+	else								\
+		auth->authenticate_target = 0;				\
+									\
+	return count;							\
+}
+
+#define __DEF_NACL_AUTH_INT(prefix, name)				\
+static ssize_t __iscsi_##prefix##_show_##name(				\
+	struct iscsi_node_acl *nacl,					\
+	char *page)							\
+{									\
+	struct iscsi_node_auth *auth = &nacl->node_auth;		\
+									\
+	if (!capable(CAP_SYS_ADMIN))					\
+		return -EPERM;						\
+									\
+	return snprintf(page, PAGE_SIZE, "%d\n", auth->name);		\
+}
+
+#define DEF_NACL_AUTH_STR(name, flags)					\
+	__DEF_NACL_AUTH_STR(nacl_auth, name, flags)			\
+static ssize_t iscsi_nacl_auth_show_##name(				\
+	struct se_node_acl *nacl,					\
+	char *page)							\
+{									\
+	return __iscsi_nacl_auth_show_##name(container_of(nacl,		\
+			struct iscsi_node_acl, se_node_acl), page);		\
+}									\
+static ssize_t iscsi_nacl_auth_store_##name(				\
+	struct se_node_acl *nacl,					\
+	const char *page,						\
+	size_t count)							\
+{									\
+	return __iscsi_nacl_auth_store_##name(container_of(nacl,	\
+			struct iscsi_node_acl, se_node_acl), page, count);	\
+}
+
+#define DEF_NACL_AUTH_INT(name)						\
+	__DEF_NACL_AUTH_INT(nacl_auth, name)				\
+static ssize_t iscsi_nacl_auth_show_##name(				\
+	struct se_node_acl *nacl,					\
+	char *page)							\
+{									\
+	return __iscsi_nacl_auth_show_##name(container_of(nacl,		\
+			struct iscsi_node_acl, se_node_acl), page);		\
+}
+
+#define AUTH_ATTR(_name, _mode)	TF_NACL_AUTH_ATTR(iscsi, _name, _mode);
+#define AUTH_ATTR_RO(_name) TF_NACL_AUTH_ATTR_RO(iscsi, _name);
+
+/*
+ * One-way authentication userid
+ */
+DEF_NACL_AUTH_STR(userid, NAF_USERID_SET);
+AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
+/*
+ * One-way authentication password
+ */
+DEF_NACL_AUTH_STR(password, NAF_PASSWORD_SET);
+AUTH_ATTR(password, S_IRUGO | S_IWUSR);
+/*
+ * Enforce mutual authentication
+ */
+DEF_NACL_AUTH_INT(authenticate_target);
+AUTH_ATTR_RO(authenticate_target);
+/*
+ * Mutual authentication userid
+ */
+DEF_NACL_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
+AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
+/*
+ * Mutual authentication password
+ */
+DEF_NACL_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
+AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_nacl_auth_attrs[] = {
+	&iscsi_nacl_auth_userid.attr,
+	&iscsi_nacl_auth_password.attr,
+	&iscsi_nacl_auth_authenticate_target.attr,
+	&iscsi_nacl_auth_userid_mutual.attr,
+	&iscsi_nacl_auth_password_mutual.attr,
+	NULL,
+};
+
+/* End items for lio_target_nacl_auth_cit */
+
+/* Start items for lio_target_nacl_param_cit */
+
+#define DEF_NACL_PARAM(name)						\
+static ssize_t iscsi_nacl_param_show_##name(				\
+	struct se_node_acl *se_nacl,					\
+	char *page)							\
+{									\
+	struct iscsi_session *sess;					\
+	struct se_session *se_sess;					\
+	ssize_t rb;							\
+									\
+	spin_lock_bh(&se_nacl->nacl_sess_lock);				\
+	se_sess = se_nacl->nacl_sess;					\
+	if (!se_sess) {							\
+		rb = snprintf(page, PAGE_SIZE,				\
+			"No Active iSCSI Session\n");			\
+	} else {							\
+		sess = se_sess->fabric_sess_ptr;			\
+		rb = snprintf(page, PAGE_SIZE, "%u\n",			\
+			(u32)sess->sess_ops->name);			\
+	}								\
+	spin_unlock_bh(&se_nacl->nacl_sess_lock);			\
+									\
+	return rb;							\
+}
+
+#define NACL_PARAM_ATTR(_name) TF_NACL_PARAM_ATTR_RO(iscsi, _name);
+
+DEF_NACL_PARAM(MaxConnections);
+NACL_PARAM_ATTR(MaxConnections);
+
+DEF_NACL_PARAM(InitialR2T);
+NACL_PARAM_ATTR(InitialR2T);
+
+DEF_NACL_PARAM(ImmediateData);
+NACL_PARAM_ATTR(ImmediateData);
+
+DEF_NACL_PARAM(MaxBurstLength);
+NACL_PARAM_ATTR(MaxBurstLength);
+
+DEF_NACL_PARAM(FirstBurstLength);
+NACL_PARAM_ATTR(FirstBurstLength);
+
+DEF_NACL_PARAM(DefaultTime2Wait);
+NACL_PARAM_ATTR(DefaultTime2Wait);
+
+DEF_NACL_PARAM(DefaultTime2Retain);
+NACL_PARAM_ATTR(DefaultTime2Retain);
+
+DEF_NACL_PARAM(MaxOutstandingR2T);
+NACL_PARAM_ATTR(MaxOutstandingR2T);
+
+DEF_NACL_PARAM(DataPDUInOrder);
+NACL_PARAM_ATTR(DataPDUInOrder);
+
+DEF_NACL_PARAM(DataSequenceInOrder);
+NACL_PARAM_ATTR(DataSequenceInOrder);
+
+DEF_NACL_PARAM(ErrorRecoveryLevel);
+NACL_PARAM_ATTR(ErrorRecoveryLevel);
+
+static struct configfs_attribute *lio_target_nacl_param_attrs[] = {
+	&iscsi_nacl_param_MaxConnections.attr,
+	&iscsi_nacl_param_InitialR2T.attr,
+	&iscsi_nacl_param_ImmediateData.attr,
+	&iscsi_nacl_param_MaxBurstLength.attr,
+	&iscsi_nacl_param_FirstBurstLength.attr,
+	&iscsi_nacl_param_DefaultTime2Wait.attr,
+	&iscsi_nacl_param_DefaultTime2Retain.attr,
+	&iscsi_nacl_param_MaxOutstandingR2T.attr,
+	&iscsi_nacl_param_DataPDUInOrder.attr,
+	&iscsi_nacl_param_DataSequenceInOrder.attr,
+	&iscsi_nacl_param_ErrorRecoveryLevel.attr,
+	NULL,
+};
+
+/* End items for lio_target_nacl_param_cit */
+
+/* Start items for lio_target_acl_cit */
+
+static ssize_t lio_target_nacl_show_info(
+	struct se_node_acl *se_nacl,
+	char *page)
+{
+	struct iscsi_session *sess;
+	struct iscsi_conn *conn;
+	struct se_session *se_sess;
+	ssize_t rb = 0;
+
+	spin_lock_bh(&se_nacl->nacl_sess_lock);
+	se_sess = se_nacl->nacl_sess;
+	if (!se_sess) {
+		rb += sprintf(page+rb, "No active iSCSI Session for Initiator"
+			" Endpoint: %s\n", se_nacl->initiatorname);
+	} else {
+		sess = se_sess->fabric_sess_ptr;
+
+		if (sess->sess_ops->InitiatorName)
+			rb += sprintf(page+rb, "InitiatorName: %s\n",
+				sess->sess_ops->InitiatorName);
+		if (sess->sess_ops->InitiatorAlias)
+			rb += sprintf(page+rb, "InitiatorAlias: %s\n",
+				sess->sess_ops->InitiatorAlias);
+
+		rb += sprintf(page+rb, "LIO Session ID: %u   "
+			"ISID: 0x%02x %02x %02x %02x %02x %02x  "
+			"TSIH: %hu  ", sess->sid,
+			sess->isid[0], sess->isid[1], sess->isid[2],
+			sess->isid[3], sess->isid[4], sess->isid[5],
+			sess->tsih);
+		rb += sprintf(page+rb, "SessionType: %s\n",
+				(sess->sess_ops->SessionType) ?
+				"Discovery" : "Normal");
+		rb += sprintf(page+rb, "Session State: ");
+		switch (sess->session_state) {
+		case TARG_SESS_STATE_FREE:
+			rb += sprintf(page+rb, "TARG_SESS_FREE\n");
+			break;
+		case TARG_SESS_STATE_ACTIVE:
+			rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n");
+			break;
+		case TARG_SESS_STATE_LOGGED_IN:
+			rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n");
+			break;
+		case TARG_SESS_STATE_FAILED:
+			rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n");
+			break;
+		case TARG_SESS_STATE_IN_CONTINUE:
+			rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n");
+			break;
+		default:
+			rb += sprintf(page+rb, "ERROR: Unknown Session"
+					" State!\n");
+			break;
+		}
+
+		rb += sprintf(page+rb, "---------------------[iSCSI Session"
+				" Values]-----------------------\n");
+		rb += sprintf(page+rb, "  CmdSN/WR  :  CmdSN/WC  :  ExpCmdSN"
+				"  :  MaxCmdSN  :     ITT    :     TTT\n");
+		rb += sprintf(page+rb, " 0x%08x   0x%08x   0x%08x   0x%08x"
+				"   0x%08x   0x%08x\n",
+			sess->cmdsn_window,
+			(sess->max_cmd_sn - sess->exp_cmd_sn) + 1,
+			sess->exp_cmd_sn, sess->max_cmd_sn,
+			sess->init_task_tag, sess->targ_xfer_tag);
+		rb += sprintf(page+rb, "----------------------[iSCSI"
+				" Connections]-------------------------\n");
+
+		spin_lock(&sess->conn_lock);
+		list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+			rb += sprintf(page+rb, "CID: %hu  Connection"
+					" State: ", conn->cid);
+			switch (conn->conn_state) {
+			case TARG_CONN_STATE_FREE:
+				rb += sprintf(page+rb,
+					"TARG_CONN_STATE_FREE\n");
+				break;
+			case TARG_CONN_STATE_XPT_UP:
+				rb += sprintf(page+rb,
+					"TARG_CONN_STATE_XPT_UP\n");
+				break;
+			case TARG_CONN_STATE_IN_LOGIN:
+				rb += sprintf(page+rb,
+					"TARG_CONN_STATE_IN_LOGIN\n");
+				break;
+			case TARG_CONN_STATE_LOGGED_IN:
+				rb += sprintf(page+rb,
+					"TARG_CONN_STATE_LOGGED_IN\n");
+				break;
+			case TARG_CONN_STATE_IN_LOGOUT:
+				rb += sprintf(page+rb,
+					"TARG_CONN_STATE_IN_LOGOUT\n");
+				break;
+			case TARG_CONN_STATE_LOGOUT_REQUESTED:
+				rb += sprintf(page+rb,
+					"TARG_CONN_STATE_LOGOUT_REQUESTED\n");
+				break;
+			case TARG_CONN_STATE_CLEANUP_WAIT:
+				rb += sprintf(page+rb,
+					"TARG_CONN_STATE_CLEANUP_WAIT\n");
+				break;
+			default:
+				rb += sprintf(page+rb,
+					"ERROR: Unknown Connection State!\n");
+				break;
+			}
+
+			rb += sprintf(page+rb, "   Address %s %s", conn->login_ip,
+				(conn->network_transport == ISCSI_TCP) ?
+				"TCP" : "SCTP");
+			rb += sprintf(page+rb, "  StatSN: 0x%08x\n",
+				conn->stat_sn);
+		}
+		spin_unlock(&sess->conn_lock);
+	}
+	spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+	return rb;
+}
+
+TF_NACL_BASE_ATTR_RO(lio_target, info);
+
+static ssize_t lio_target_nacl_show_cmdsn_depth(
+	struct se_node_acl *se_nacl,
+	char *page)
+{
+	return sprintf(page, "%u\n", se_nacl->queue_depth);
+}
+
+static ssize_t lio_target_nacl_store_cmdsn_depth(
+	struct se_node_acl *se_nacl,
+	const char *page,
+	size_t count)
+{
+	struct se_portal_group *se_tpg = se_nacl->se_tpg;
+	struct iscsi_portal_group *tpg = container_of(se_tpg,
+			struct iscsi_portal_group, tpg_se_tpg);
+	struct config_item *acl_ci, *tpg_ci, *wwn_ci;
+	char *endptr;
+	u32 cmdsn_depth = 0;
+	int ret;
+
+	cmdsn_depth = simple_strtoul(page, &endptr, 0);
+	if (cmdsn_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
+		pr_err("Passed cmdsn_depth: %u exceeds"
+			" TA_DEFAULT_CMDSN_DEPTH_MAX: %u\n", cmdsn_depth,
+			TA_DEFAULT_CMDSN_DEPTH_MAX);
+		return -EINVAL;
+	}
+	acl_ci = &se_nacl->acl_group.cg_item;
+	if (!acl_ci) {
+		pr_err("Unable to locatel acl_ci\n");
+		return -EINVAL;
+	}
+	tpg_ci = &acl_ci->ci_parent->ci_group->cg_item;
+	if (!tpg_ci) {
+		pr_err("Unable to locate tpg_ci\n");
+		return -EINVAL;
+	}
+	wwn_ci = &tpg_ci->ci_group->cg_item;
+	if (!wwn_ci) {
+		pr_err("Unable to locate config_item wwn_ci\n");
+		return -EINVAL;
+	}
+
+	if (iscsit_get_tpg(tpg) < 0)
+		return -EINVAL;
+	/*
+	 * iscsit_tpg_set_initiator_node_queue_depth() assumes force=1
+	 */
+	ret = iscsit_tpg_set_initiator_node_queue_depth(tpg,
+				config_item_name(acl_ci), cmdsn_depth, 1);
+
+	pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for"
+		"InitiatorName: %s\n", config_item_name(wwn_ci),
+		config_item_name(tpg_ci), cmdsn_depth,
+		config_item_name(acl_ci));
+
+	iscsit_put_tpg(tpg);
+	return (!ret) ? count : (ssize_t)ret;
+}
+
+TF_NACL_BASE_ATTR(lio_target, cmdsn_depth, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_initiator_attrs[] = {
+	&lio_target_nacl_info.attr,
+	&lio_target_nacl_cmdsn_depth.attr,
+	NULL,
+};
+
+static struct se_node_acl *lio_tpg_alloc_fabric_acl(
+	struct se_portal_group *se_tpg)
+{
+	struct iscsi_node_acl *acl;
+
+	acl = kzalloc(sizeof(struct iscsi_node_acl), GFP_KERNEL);
+	if (!acl) {
+		pr_err("Unable to allocate memory for struct iscsi_node_acl\n");
+		return NULL;
+	}
+
+	return &acl->se_node_acl;
+}
+
+static struct se_node_acl *lio_target_make_nodeacl(
+	struct se_portal_group *se_tpg,
+	struct config_group *group,
+	const char *name)
+{
+	struct config_group *stats_cg;
+	struct iscsi_node_acl *acl;
+	struct se_node_acl *se_nacl_new, *se_nacl;
+	struct iscsi_portal_group *tpg = container_of(se_tpg,
+			struct iscsi_portal_group, tpg_se_tpg);
+	u32 cmdsn_depth;
+
+	se_nacl_new = lio_tpg_alloc_fabric_acl(se_tpg);
+	if (!se_nacl_new)
+		return ERR_PTR(-ENOMEM);
+
+	cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
+	/*
+	 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+	 * when converting a NdoeACL from demo mode -> explict
+	 */
+	se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+				name, cmdsn_depth);
+	if (IS_ERR(se_nacl))
+		return se_nacl;
+
+	acl = container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
+	stats_cg = &se_nacl->acl_fabric_stat_group;
+
+	stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+				GFP_KERNEL);
+	if (!stats_cg->default_groups) {
+		pr_err("Unable to allocate memory for"
+				" stats_cg->default_groups\n");
+		core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
+		kfree(acl);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	stats_cg->default_groups[0] = &NODE_STAT_GRPS(acl)->iscsi_sess_stats_group;
+	stats_cg->default_groups[1] = NULL;
+	config_group_init_type_name(&NODE_STAT_GRPS(acl)->iscsi_sess_stats_group,
+			"iscsi_sess_stats", &iscsi_stat_sess_cit);
+
+	return se_nacl;
+}
+
+static void lio_target_drop_nodeacl(
+	struct se_node_acl *se_nacl)
+{
+	struct se_portal_group *se_tpg = se_nacl->se_tpg;
+	struct iscsi_node_acl *acl = container_of(se_nacl,
+			struct iscsi_node_acl, se_node_acl);
+	struct config_item *df_item;
+	struct config_group *stats_cg;
+	int i;
+
+	stats_cg = &acl->se_node_acl.acl_fabric_stat_group;
+	for (i = 0; stats_cg->default_groups[i]; i++) {
+		df_item = &stats_cg->default_groups[i]->cg_item;
+		stats_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(stats_cg->default_groups);
+
+	core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
+	kfree(acl);
+}
+
+/* End items for lio_target_acl_cit */
+
+/* Start items for lio_target_tpg_attrib_cit */
+
+#define DEF_TPG_ATTRIB(name)						\
+									\
+static ssize_t iscsi_tpg_attrib_show_##name(				\
+	struct se_portal_group *se_tpg,				\
+	char *page)							\
+{									\
+	struct iscsi_portal_group *tpg = container_of(se_tpg,		\
+			struct iscsi_portal_group, tpg_se_tpg);	\
+	ssize_t rb;							\
+									\
+	if (iscsit_get_tpg(tpg) < 0)					\
+		return -EINVAL;						\
+									\
+	rb = sprintf(page, "%u\n", ISCSI_TPG_ATTRIB(tpg)->name);	\
+	iscsit_put_tpg(tpg);						\
+	return rb;							\
+}									\
+									\
+static ssize_t iscsi_tpg_attrib_store_##name(				\
+	struct se_portal_group *se_tpg,				\
+	const char *page,						\
+	size_t count)							\
+{									\
+	struct iscsi_portal_group *tpg = container_of(se_tpg,		\
+			struct iscsi_portal_group, tpg_se_tpg);	\
+	char *endptr;							\
+	u32 val;							\
+	int ret;							\
+									\
+	if (iscsit_get_tpg(tpg) < 0)					\
+		return -EINVAL;						\
+									\
+	val = simple_strtoul(page, &endptr, 0);				\
+	ret = iscsit_ta_##name(tpg, val);				\
+	if (ret < 0)							\
+		goto out;						\
+									\
+	iscsit_put_tpg(tpg);						\
+	return count;							\
+out:									\
+	iscsit_put_tpg(tpg);						\
+	return ret;							\
+}
+
+#define TPG_ATTR(_name, _mode) TF_TPG_ATTRIB_ATTR(iscsi, _name, _mode);
+
+/*
+ * Define iscsi_tpg_attrib_s_authentication
+ */
+DEF_TPG_ATTRIB(authentication);
+TPG_ATTR(authentication, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_login_timeout
+ */
+DEF_TPG_ATTRIB(login_timeout);
+TPG_ATTR(login_timeout, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_netif_timeout
+ */
+DEF_TPG_ATTRIB(netif_timeout);
+TPG_ATTR(netif_timeout, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_generate_node_acls
+ */
+DEF_TPG_ATTRIB(generate_node_acls);
+TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_default_cmdsn_depth
+ */
+DEF_TPG_ATTRIB(default_cmdsn_depth);
+TPG_ATTR(default_cmdsn_depth, S_IRUGO | S_IWUSR);
+/*
+ Define iscsi_tpg_attrib_s_cache_dynamic_acls
+ */
+DEF_TPG_ATTRIB(cache_dynamic_acls);
+TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_demo_mode_write_protect
+ */
+DEF_TPG_ATTRIB(demo_mode_write_protect);
+TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_prod_mode_write_protect
+ */
+DEF_TPG_ATTRIB(prod_mode_write_protect);
+TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
+	&iscsi_tpg_attrib_authentication.attr,
+	&iscsi_tpg_attrib_login_timeout.attr,
+	&iscsi_tpg_attrib_netif_timeout.attr,
+	&iscsi_tpg_attrib_generate_node_acls.attr,
+	&iscsi_tpg_attrib_default_cmdsn_depth.attr,
+	&iscsi_tpg_attrib_cache_dynamic_acls.attr,
+	&iscsi_tpg_attrib_demo_mode_write_protect.attr,
+	&iscsi_tpg_attrib_prod_mode_write_protect.attr,
+	NULL,
+};
+
+/* End items for lio_target_tpg_attrib_cit */
+
+/* Start items for lio_target_tpg_param_cit */
+
+#define DEF_TPG_PARAM(name)						\
+static ssize_t iscsi_tpg_param_show_##name(				\
+	struct se_portal_group *se_tpg,					\
+	char *page)							\
+{									\
+	struct iscsi_portal_group *tpg = container_of(se_tpg,		\
+			struct iscsi_portal_group, tpg_se_tpg);		\
+	struct iscsi_param *param;					\
+	ssize_t rb;							\
+									\
+	if (iscsit_get_tpg(tpg) < 0)					\
+		return -EINVAL;						\
+									\
+	param = iscsi_find_param_from_key(__stringify(name),		\
+				tpg->param_list);			\
+	if (!param) {							\
+		iscsit_put_tpg(tpg);					\
+		return -EINVAL;						\
+	}								\
+	rb = snprintf(page, PAGE_SIZE, "%s\n", param->value);		\
+									\
+	iscsit_put_tpg(tpg);						\
+	return rb;							\
+}									\
+static ssize_t iscsi_tpg_param_store_##name(				\
+	struct se_portal_group *se_tpg,				\
+	const char *page,						\
+	size_t count)							\
+{									\
+	struct iscsi_portal_group *tpg = container_of(se_tpg,		\
+			struct iscsi_portal_group, tpg_se_tpg);		\
+	char *buf;							\
+	int ret;							\
+									\
+	buf = kzalloc(PAGE_SIZE, GFP_KERNEL);				\
+	if (!buf)							\
+		return -ENOMEM;						\
+	snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page);	\
+	buf[strlen(buf)-1] = '\0'; /* Kill newline */			\
+									\
+	if (iscsit_get_tpg(tpg) < 0) {					\
+		kfree(buf);						\
+		return -EINVAL;						\
+	}								\
+									\
+	ret = iscsi_change_param_value(buf, tpg->param_list, 1);	\
+	if (ret < 0)							\
+		goto out;						\
+									\
+	kfree(buf);							\
+	iscsit_put_tpg(tpg);						\
+	return count;							\
+out:									\
+	kfree(buf);							\
+	iscsit_put_tpg(tpg);						\
+	return -EINVAL;						\
+}
+
+#define TPG_PARAM_ATTR(_name, _mode) TF_TPG_PARAM_ATTR(iscsi, _name, _mode);
+
+DEF_TPG_PARAM(AuthMethod);
+TPG_PARAM_ATTR(AuthMethod, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(HeaderDigest);
+TPG_PARAM_ATTR(HeaderDigest, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(DataDigest);
+TPG_PARAM_ATTR(DataDigest, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(MaxConnections);
+TPG_PARAM_ATTR(MaxConnections, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(TargetAlias);
+TPG_PARAM_ATTR(TargetAlias, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(InitialR2T);
+TPG_PARAM_ATTR(InitialR2T, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(ImmediateData);
+TPG_PARAM_ATTR(ImmediateData, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(MaxRecvDataSegmentLength);
+TPG_PARAM_ATTR(MaxRecvDataSegmentLength, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(MaxBurstLength);
+TPG_PARAM_ATTR(MaxBurstLength, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(FirstBurstLength);
+TPG_PARAM_ATTR(FirstBurstLength, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(DefaultTime2Wait);
+TPG_PARAM_ATTR(DefaultTime2Wait, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(DefaultTime2Retain);
+TPG_PARAM_ATTR(DefaultTime2Retain, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(MaxOutstandingR2T);
+TPG_PARAM_ATTR(MaxOutstandingR2T, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(DataPDUInOrder);
+TPG_PARAM_ATTR(DataPDUInOrder, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(DataSequenceInOrder);
+TPG_PARAM_ATTR(DataSequenceInOrder, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(ErrorRecoveryLevel);
+TPG_PARAM_ATTR(ErrorRecoveryLevel, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(IFMarker);
+TPG_PARAM_ATTR(IFMarker, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(OFMarker);
+TPG_PARAM_ATTR(OFMarker, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(IFMarkInt);
+TPG_PARAM_ATTR(IFMarkInt, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(OFMarkInt);
+TPG_PARAM_ATTR(OFMarkInt, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_tpg_param_attrs[] = {
+	&iscsi_tpg_param_AuthMethod.attr,
+	&iscsi_tpg_param_HeaderDigest.attr,
+	&iscsi_tpg_param_DataDigest.attr,
+	&iscsi_tpg_param_MaxConnections.attr,
+	&iscsi_tpg_param_TargetAlias.attr,
+	&iscsi_tpg_param_InitialR2T.attr,
+	&iscsi_tpg_param_ImmediateData.attr,
+	&iscsi_tpg_param_MaxRecvDataSegmentLength.attr,
+	&iscsi_tpg_param_MaxBurstLength.attr,
+	&iscsi_tpg_param_FirstBurstLength.attr,
+	&iscsi_tpg_param_DefaultTime2Wait.attr,
+	&iscsi_tpg_param_DefaultTime2Retain.attr,
+	&iscsi_tpg_param_MaxOutstandingR2T.attr,
+	&iscsi_tpg_param_DataPDUInOrder.attr,
+	&iscsi_tpg_param_DataSequenceInOrder.attr,
+	&iscsi_tpg_param_ErrorRecoveryLevel.attr,
+	&iscsi_tpg_param_IFMarker.attr,
+	&iscsi_tpg_param_OFMarker.attr,
+	&iscsi_tpg_param_IFMarkInt.attr,
+	&iscsi_tpg_param_OFMarkInt.attr,
+	NULL,
+};
+
+/* End items for lio_target_tpg_param_cit */
+
+/* Start items for lio_target_tpg_cit */
+
+static ssize_t lio_target_tpg_show_enable(
+	struct se_portal_group *se_tpg,
+	char *page)
+{
+	struct iscsi_portal_group *tpg = container_of(se_tpg,
+			struct iscsi_portal_group, tpg_se_tpg);
+	ssize_t len;
+
+	spin_lock(&tpg->tpg_state_lock);
+	len = sprintf(page, "%d\n",
+			(tpg->tpg_state == TPG_STATE_ACTIVE) ? 1 : 0);
+	spin_unlock(&tpg->tpg_state_lock);
+
+	return len;
+}
+
+static ssize_t lio_target_tpg_store_enable(
+	struct se_portal_group *se_tpg,
+	const char *page,
+	size_t count)
+{
+	struct iscsi_portal_group *tpg = container_of(se_tpg,
+			struct iscsi_portal_group, tpg_se_tpg);
+	char *endptr;
+	u32 op;
+	int ret = 0;
+
+	op = simple_strtoul(page, &endptr, 0);
+	if ((op != 1) && (op != 0)) {
+		pr_err("Illegal value for tpg_enable: %u\n", op);
+		return -EINVAL;
+	}
+
+	ret = iscsit_get_tpg(tpg);
+	if (ret < 0)
+		return -EINVAL;
+
+	if (op) {
+		ret = iscsit_tpg_enable_portal_group(tpg);
+		if (ret < 0)
+			goto out;
+	} else {
+		/*
+		 * iscsit_tpg_disable_portal_group() assumes force=1
+		 */
+		ret = iscsit_tpg_disable_portal_group(tpg, 1);
+		if (ret < 0)
+			goto out;
+	}
+
+	iscsit_put_tpg(tpg);
+	return count;
+out:
+	iscsit_put_tpg(tpg);
+	return -EINVAL;
+}
+
+TF_TPG_BASE_ATTR(lio_target, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_tpg_attrs[] = {
+	&lio_target_tpg_enable.attr,
+	NULL,
+};
+
+/* End items for lio_target_tpg_cit */
+
+/* Start items for lio_target_tiqn_cit */
+
+struct se_portal_group *lio_target_tiqn_addtpg(
+	struct se_wwn *wwn,
+	struct config_group *group,
+	const char *name)
+{
+	struct iscsi_portal_group *tpg;
+	struct iscsi_tiqn *tiqn;
+	char *tpgt_str, *end_ptr;
+	int ret = 0;
+	unsigned short int tpgt;
+
+	tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
+	/*
+	 * Only tpgt_# directory groups can be created below
+	 * target/iscsi/iqn.superturodiskarry/
+	*/
+	tpgt_str = strstr(name, "tpgt_");
+	if (!tpgt_str) {
+		pr_err("Unable to locate \"tpgt_#\" directory"
+				" group\n");
+		return NULL;
+	}
+	tpgt_str += 5; /* Skip ahead of "tpgt_" */
+	tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
+
+	tpg = iscsit_alloc_portal_group(tiqn, tpgt);
+	if (!tpg)
+		return NULL;
+
+	ret = core_tpg_register(
+			&lio_target_fabric_configfs->tf_ops,
+			wwn, &tpg->tpg_se_tpg, tpg,
+			TRANSPORT_TPG_TYPE_NORMAL);
+	if (ret < 0)
+		return NULL;
+
+	ret = iscsit_tpg_add_portal_group(tiqn, tpg);
+	if (ret != 0)
+		goto out;
+
+	pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
+	pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated TPG: %s\n",
+			name);
+	return &tpg->tpg_se_tpg;
+out:
+	core_tpg_deregister(&tpg->tpg_se_tpg);
+	kfree(tpg);
+	return NULL;
+}
+
+void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
+{
+	struct iscsi_portal_group *tpg;
+	struct iscsi_tiqn *tiqn;
+
+	tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+	tiqn = tpg->tpg_tiqn;
+	/*
+	 * iscsit_tpg_del_portal_group() assumes force=1
+	 */
+	pr_debug("LIO_Target_ConfigFS: DEREGISTER -> Releasing TPG\n");
+	iscsit_tpg_del_portal_group(tiqn, tpg, 1);
+}
+
+/* End items for lio_target_tiqn_cit */
+
+/* Start LIO-Target TIQN struct contig_item lio_target_cit */
+
+static ssize_t lio_target_wwn_show_attr_lio_version(
+	struct target_fabric_configfs *tf,
+	char *page)
+{
+	return sprintf(page, "RisingTide Systems Linux-iSCSI Target "ISCSIT_VERSION"\n");
+}
+
+TF_WWN_ATTR_RO(lio_target, lio_version);
+
+static struct configfs_attribute *lio_target_wwn_attrs[] = {
+	&lio_target_wwn_lio_version.attr,
+	NULL,
+};
+
+struct se_wwn *lio_target_call_coreaddtiqn(
+	struct target_fabric_configfs *tf,
+	struct config_group *group,
+	const char *name)
+{
+	struct config_group *stats_cg;
+	struct iscsi_tiqn *tiqn;
+
+	tiqn = iscsit_add_tiqn((unsigned char *)name);
+	if (IS_ERR(tiqn))
+		return ERR_CAST(tiqn);
+	/*
+	 * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group.
+	 */
+	stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
+
+	stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
+				GFP_KERNEL);
+	if (!stats_cg->default_groups) {
+		pr_err("Unable to allocate memory for"
+				" stats_cg->default_groups\n");
+		iscsit_del_tiqn(tiqn);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	stats_cg->default_groups[0] = &WWN_STAT_GRPS(tiqn)->iscsi_instance_group;
+	stats_cg->default_groups[1] = &WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group;
+	stats_cg->default_groups[2] = &WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group;
+	stats_cg->default_groups[3] = &WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group;
+	stats_cg->default_groups[4] = &WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group;
+	stats_cg->default_groups[5] = NULL;
+	config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_instance_group,
+			"iscsi_instance", &iscsi_stat_instance_cit);
+	config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group,
+			"iscsi_sess_err", &iscsi_stat_sess_err_cit);
+	config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group,
+			"iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit);
+	config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group,
+			"iscsi_login_stats", &iscsi_stat_login_cit);
+	config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group,
+			"iscsi_logout_stats", &iscsi_stat_logout_cit);
+
+	pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
+	pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
+			" %s\n", name);
+	return &tiqn->tiqn_wwn;
+}
+
+void lio_target_call_coredeltiqn(
+	struct se_wwn *wwn)
+{
+	struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
+	struct config_item *df_item;
+	struct config_group *stats_cg;
+	int i;
+
+	stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
+	for (i = 0; stats_cg->default_groups[i]; i++) {
+		df_item = &stats_cg->default_groups[i]->cg_item;
+		stats_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(stats_cg->default_groups);
+
+	pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n",
+			tiqn->tiqn);
+	iscsit_del_tiqn(tiqn);
+}
+
+/* End LIO-Target TIQN struct contig_lio_target_cit */
+
+/* Start lio_target_discovery_auth_cit */
+
+#define DEF_DISC_AUTH_STR(name, flags)					\
+	__DEF_NACL_AUTH_STR(disc, name, flags)				\
+static ssize_t iscsi_disc_show_##name(					\
+	struct target_fabric_configfs *tf,				\
+	char *page)							\
+{									\
+	return __iscsi_disc_show_##name(&iscsit_global->discovery_acl,	\
+		page);							\
+}									\
+static ssize_t iscsi_disc_store_##name(					\
+	struct target_fabric_configfs *tf,				\
+	const char *page,						\
+	size_t count)							\
+{									\
+	return __iscsi_disc_store_##name(&iscsit_global->discovery_acl,	\
+		page, count);						\
+}
+
+#define DEF_DISC_AUTH_INT(name)						\
+	__DEF_NACL_AUTH_INT(disc, name)					\
+static ssize_t iscsi_disc_show_##name(					\
+	struct target_fabric_configfs *tf,				\
+	char *page)							\
+{									\
+	return __iscsi_disc_show_##name(&iscsit_global->discovery_acl,	\
+			page);						\
+}
+
+#define DISC_AUTH_ATTR(_name, _mode) TF_DISC_ATTR(iscsi, _name, _mode)
+#define DISC_AUTH_ATTR_RO(_name) TF_DISC_ATTR_RO(iscsi, _name)
+
+/*
+ * One-way authentication userid
+ */
+DEF_DISC_AUTH_STR(userid, NAF_USERID_SET);
+DISC_AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
+/*
+ * One-way authentication password
+ */
+DEF_DISC_AUTH_STR(password, NAF_PASSWORD_SET);
+DISC_AUTH_ATTR(password, S_IRUGO | S_IWUSR);
+/*
+ * Enforce mutual authentication
+ */
+DEF_DISC_AUTH_INT(authenticate_target);
+DISC_AUTH_ATTR_RO(authenticate_target);
+/*
+ * Mutual authentication userid
+ */
+DEF_DISC_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
+DISC_AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
+/*
+ * Mutual authentication password
+ */
+DEF_DISC_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
+DISC_AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
+
+/*
+ * enforce_discovery_auth
+ */
+static ssize_t iscsi_disc_show_enforce_discovery_auth(
+	struct target_fabric_configfs *tf,
+	char *page)
+{
+	struct iscsi_node_auth *discovery_auth = &iscsit_global->discovery_acl.node_auth;
+
+	return sprintf(page, "%d\n", discovery_auth->enforce_discovery_auth);
+}
+
+static ssize_t iscsi_disc_store_enforce_discovery_auth(
+	struct target_fabric_configfs *tf,
+	const char *page,
+	size_t count)
+{
+	struct iscsi_param *param;
+	struct iscsi_portal_group *discovery_tpg = iscsit_global->discovery_tpg;
+	char *endptr;
+	u32 op;
+
+	op = simple_strtoul(page, &endptr, 0);
+	if ((op != 1) && (op != 0)) {
+		pr_err("Illegal value for enforce_discovery_auth:"
+				" %u\n", op);
+		return -EINVAL;
+	}
+
+	if (!discovery_tpg) {
+		pr_err("iscsit_global->discovery_tpg is NULL\n");
+		return -EINVAL;
+	}
+
+	param = iscsi_find_param_from_key(AUTHMETHOD,
+				discovery_tpg->param_list);
+	if (!param)
+		return -EINVAL;
+
+	if (op) {
+		/*
+		 * Reset the AuthMethod key to CHAP.
+		 */
+		if (iscsi_update_param_value(param, CHAP) < 0)
+			return -EINVAL;
+
+		discovery_tpg->tpg_attrib.authentication = 1;
+		iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 1;
+		pr_debug("LIO-CORE[0] Successfully enabled"
+			" authentication enforcement for iSCSI"
+			" Discovery TPG\n");
+	} else {
+		/*
+		 * Reset the AuthMethod key to CHAP,None
+		 */
+		if (iscsi_update_param_value(param, "CHAP,None") < 0)
+			return -EINVAL;
+
+		discovery_tpg->tpg_attrib.authentication = 0;
+		iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 0;
+		pr_debug("LIO-CORE[0] Successfully disabled"
+			" authentication enforcement for iSCSI"
+			" Discovery TPG\n");
+	}
+
+	return count;
+}
+
+DISC_AUTH_ATTR(enforce_discovery_auth, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_discovery_auth_attrs[] = {
+	&iscsi_disc_userid.attr,
+	&iscsi_disc_password.attr,
+	&iscsi_disc_authenticate_target.attr,
+	&iscsi_disc_userid_mutual.attr,
+	&iscsi_disc_password_mutual.attr,
+	&iscsi_disc_enforce_discovery_auth.attr,
+	NULL,
+};
+
+/* End lio_target_discovery_auth_cit */
+
+/* Start functions for target_core_fabric_ops */
+
+static char *iscsi_get_fabric_name(void)
+{
+	return "iSCSI";
+}
+
+static u32 iscsi_get_task_tag(struct se_cmd *se_cmd)
+{
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+	return cmd->init_task_tag;
+}
+
+static int iscsi_get_cmd_state(struct se_cmd *se_cmd)
+{
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+	return cmd->i_state;
+}
+
+static u32 lio_sess_get_index(struct se_session *se_sess)
+{
+	struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+
+	return sess->session_index;
+}
+
+static u32 lio_sess_get_initiator_sid(
+	struct se_session *se_sess,
+	unsigned char *buf,
+	u32 size)
+{
+	struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+	/*
+	 * iSCSI Initiator Session Identifier from RFC-3720.
+	 */
+	return snprintf(buf, size, "%02x%02x%02x%02x%02x%02x",
+		sess->isid[0], sess->isid[1], sess->isid[2],
+		sess->isid[3], sess->isid[4], sess->isid[5]);
+}
+
+static int lio_queue_data_in(struct se_cmd *se_cmd)
+{
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+	cmd->i_state = ISTATE_SEND_DATAIN;
+	iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+	return 0;
+}
+
+static int lio_write_pending(struct se_cmd *se_cmd)
+{
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+	if (!cmd->immediate_data && !cmd->unsolicited_data)
+		return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 1);
+
+	return 0;
+}
+
+static int lio_write_pending_status(struct se_cmd *se_cmd)
+{
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+	int ret;
+
+	spin_lock_bh(&cmd->istate_lock);
+	ret = !(cmd->cmd_flags & ICF_GOT_LAST_DATAOUT);
+	spin_unlock_bh(&cmd->istate_lock);
+
+	return ret;
+}
+
+static int lio_queue_status(struct se_cmd *se_cmd)
+{
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+	cmd->i_state = ISTATE_SEND_STATUS;
+	iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+	return 0;
+}
+
+static u16 lio_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
+{
+	unsigned char *buffer = se_cmd->sense_buffer;
+	/*
+	 * From RFC-3720 10.4.7.  Data Segment - Sense and Response Data Segment
+	 * 16-bit SenseLength.
+	 */
+	buffer[0] = ((sense_length >> 8) & 0xff);
+	buffer[1] = (sense_length & 0xff);
+	/*
+	 * Return two byte offset into allocated sense_buffer.
+	 */
+	return 2;
+}
+
+static u16 lio_get_fabric_sense_len(void)
+{
+	/*
+	 * Return two byte offset into allocated sense_buffer.
+	 */
+	return 2;
+}
+
+static int lio_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+	cmd->i_state = ISTATE_SEND_TASKMGTRSP;
+	iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+	return 0;
+}
+
+static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
+{
+	struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+	return &tpg->tpg_tiqn->tiqn[0];
+}
+
+static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg)
+{
+	struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+	return tpg->tpgt;
+}
+
+static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
+{
+	struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+	return ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
+}
+
+static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
+{
+	struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+	return ISCSI_TPG_ATTRIB(tpg)->generate_node_acls;
+}
+
+static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+	struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+	return ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls;
+}
+
+static int lio_tpg_check_demo_mode_write_protect(
+	struct se_portal_group *se_tpg)
+{
+	struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+	return ISCSI_TPG_ATTRIB(tpg)->demo_mode_write_protect;
+}
+
+static int lio_tpg_check_prod_mode_write_protect(
+	struct se_portal_group *se_tpg)
+{
+	struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+	return ISCSI_TPG_ATTRIB(tpg)->prod_mode_write_protect;
+}
+
+static void lio_tpg_release_fabric_acl(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_acl)
+{
+	struct iscsi_node_acl *acl = container_of(se_acl,
+				struct iscsi_node_acl, se_node_acl);
+	kfree(acl);
+}
+
+/*
+ * Called with spin_lock_bh(struct se_portal_group->session_lock) held..
+ *
+ * Also, this function calls iscsit_inc_session_usage_count() on the
+ * struct iscsi_session in question.
+ */
+static int lio_tpg_shutdown_session(struct se_session *se_sess)
+{
+	struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+
+	spin_lock(&sess->conn_lock);
+	if (atomic_read(&sess->session_fall_back_to_erl0) ||
+	    atomic_read(&sess->session_logout) ||
+	    (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+		spin_unlock(&sess->conn_lock);
+		return 0;
+	}
+	atomic_set(&sess->session_reinstatement, 1);
+	spin_unlock(&sess->conn_lock);
+
+	iscsit_stop_time2retain_timer(sess);
+	iscsit_stop_session(sess, 1, 1);
+
+	return 1;
+}
+
+/*
+ * Calls iscsit_dec_session_usage_count() as inverse of
+ * lio_tpg_shutdown_session()
+ */
+static void lio_tpg_close_session(struct se_session *se_sess)
+{
+	struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+	/*
+	 * If the iSCSI Session for the iSCSI Initiator Node exists,
+	 * forcefully shutdown the iSCSI NEXUS.
+	 */
+	iscsit_close_session(sess);
+}
+
+static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+	struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+	return tpg->tpg_tiqn->tiqn_index;
+}
+
+static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
+{
+	struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl,
+				se_node_acl);
+
+	ISCSI_NODE_ATTRIB(acl)->nacl = acl;
+	iscsit_set_default_node_attribues(acl);
+}
+
+static void lio_release_cmd(struct se_cmd *se_cmd)
+{
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+	iscsit_release_cmd(cmd);
+}
+
+/* End functions for target_core_fabric_ops */
+
+int iscsi_target_register_configfs(void)
+{
+	struct target_fabric_configfs *fabric;
+	int ret;
+
+	lio_target_fabric_configfs = NULL;
+	fabric = target_fabric_configfs_init(THIS_MODULE, "iscsi");
+	if (IS_ERR(fabric)) {
+		pr_err("target_fabric_configfs_init() for"
+				" LIO-Target failed!\n");
+		return PTR_ERR(fabric);
+	}
+	/*
+	 * Setup the fabric API of function pointers used by target_core_mod..
+	 */
+	fabric->tf_ops.get_fabric_name = &iscsi_get_fabric_name;
+	fabric->tf_ops.get_fabric_proto_ident = &iscsi_get_fabric_proto_ident;
+	fabric->tf_ops.tpg_get_wwn = &lio_tpg_get_endpoint_wwn;
+	fabric->tf_ops.tpg_get_tag = &lio_tpg_get_tag;
+	fabric->tf_ops.tpg_get_default_depth = &lio_tpg_get_default_depth;
+	fabric->tf_ops.tpg_get_pr_transport_id = &iscsi_get_pr_transport_id;
+	fabric->tf_ops.tpg_get_pr_transport_id_len =
+				&iscsi_get_pr_transport_id_len;
+	fabric->tf_ops.tpg_parse_pr_out_transport_id =
+				&iscsi_parse_pr_out_transport_id;
+	fabric->tf_ops.tpg_check_demo_mode = &lio_tpg_check_demo_mode;
+	fabric->tf_ops.tpg_check_demo_mode_cache =
+				&lio_tpg_check_demo_mode_cache;
+	fabric->tf_ops.tpg_check_demo_mode_write_protect =
+				&lio_tpg_check_demo_mode_write_protect;
+	fabric->tf_ops.tpg_check_prod_mode_write_protect =
+				&lio_tpg_check_prod_mode_write_protect;
+	fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl;
+	fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl;
+	fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index;
+	fabric->tf_ops.release_cmd = &lio_release_cmd;
+	fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session;
+	fabric->tf_ops.close_session = &lio_tpg_close_session;
+	fabric->tf_ops.sess_get_index = &lio_sess_get_index;
+	fabric->tf_ops.sess_get_initiator_sid = &lio_sess_get_initiator_sid;
+	fabric->tf_ops.write_pending = &lio_write_pending;
+	fabric->tf_ops.write_pending_status = &lio_write_pending_status;
+	fabric->tf_ops.set_default_node_attributes =
+				&lio_set_default_node_attributes;
+	fabric->tf_ops.get_task_tag = &iscsi_get_task_tag;
+	fabric->tf_ops.get_cmd_state = &iscsi_get_cmd_state;
+	fabric->tf_ops.queue_data_in = &lio_queue_data_in;
+	fabric->tf_ops.queue_status = &lio_queue_status;
+	fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
+	fabric->tf_ops.set_fabric_sense_len = &lio_set_fabric_sense_len;
+	fabric->tf_ops.get_fabric_sense_len = &lio_get_fabric_sense_len;
+	/*
+	 * Setup function pointers for generic logic in target_core_fabric_configfs.c
+	 */
+	fabric->tf_ops.fabric_make_wwn = &lio_target_call_coreaddtiqn;
+	fabric->tf_ops.fabric_drop_wwn = &lio_target_call_coredeltiqn;
+	fabric->tf_ops.fabric_make_tpg = &lio_target_tiqn_addtpg;
+	fabric->tf_ops.fabric_drop_tpg = &lio_target_tiqn_deltpg;
+	fabric->tf_ops.fabric_post_link	= NULL;
+	fabric->tf_ops.fabric_pre_unlink = NULL;
+	fabric->tf_ops.fabric_make_np = &lio_target_call_addnptotpg;
+	fabric->tf_ops.fabric_drop_np = &lio_target_call_delnpfromtpg;
+	fabric->tf_ops.fabric_make_nodeacl = &lio_target_make_nodeacl;
+	fabric->tf_ops.fabric_drop_nodeacl = &lio_target_drop_nodeacl;
+	/*
+	 * Setup default attribute lists for various fabric->tf_cit_tmpl
+	 * sturct config_item_type's
+	 */
+	TF_CIT_TMPL(fabric)->tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
+	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
+
+	ret = target_fabric_configfs_register(fabric);
+	if (ret < 0) {
+		pr_err("target_fabric_configfs_register() for"
+				" LIO-Target failed!\n");
+		target_fabric_configfs_free(fabric);
+		return ret;
+	}
+
+	lio_target_fabric_configfs = fabric;
+	pr_debug("LIO_TARGET[0] - Set fabric ->"
+			" lio_target_fabric_configfs\n");
+	return 0;
+}
+
+
+void iscsi_target_deregister_configfs(void)
+{
+	if (!lio_target_fabric_configfs)
+		return;
+	/*
+	 * Shutdown discovery sessions and disable discovery TPG
+	 */
+	if (iscsit_global->discovery_tpg)
+		iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
+
+	target_fabric_configfs_deregister(lio_target_fabric_configfs);
+	lio_target_fabric_configfs = NULL;
+	pr_debug("LIO_TARGET[0] - Cleared"
+				" lio_target_fabric_configfs\n");
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_configfs.h b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_configfs.h
new file mode 100644
index 0000000..8cd5a63
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_configfs.h
@@ -0,0 +1,7 @@
+#ifndef ISCSI_TARGET_CONFIGFS_H
+#define ISCSI_TARGET_CONFIGFS_H
+
+extern int iscsi_target_register_configfs(void);
+extern void iscsi_target_deregister_configfs(void);
+
+#endif /* ISCSI_TARGET_CONFIGFS_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_core.h b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_core.h
new file mode 100644
index 0000000..2e46ea4
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_core.h
@@ -0,0 +1,859 @@
+#ifndef ISCSI_TARGET_CORE_H
+#define ISCSI_TARGET_CORE_H
+
+#include <linux/in.h>
+#include <linux/configfs.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+
+#define ISCSIT_VERSION			"v4.1.0-rc2"
+#define ISCSI_MAX_DATASN_MISSING_COUNT	16
+#define ISCSI_TX_THREAD_TCP_TIMEOUT	2
+#define ISCSI_RX_THREAD_TCP_TIMEOUT	2
+#define SECONDS_FOR_ASYNC_LOGOUT	10
+#define SECONDS_FOR_ASYNC_TEXT		10
+#define SECONDS_FOR_LOGOUT_COMP		15
+#define WHITE_SPACE			" \t\v\f\n\r"
+
+/* struct iscsi_node_attrib sanity values */
+#define NA_DATAOUT_TIMEOUT		3
+#define NA_DATAOUT_TIMEOUT_MAX		60
+#define NA_DATAOUT_TIMEOUT_MIX		2
+#define NA_DATAOUT_TIMEOUT_RETRIES	5
+#define NA_DATAOUT_TIMEOUT_RETRIES_MAX	15
+#define NA_DATAOUT_TIMEOUT_RETRIES_MIN	1
+#define NA_NOPIN_TIMEOUT		15
+#define NA_NOPIN_TIMEOUT_MAX		60
+#define NA_NOPIN_TIMEOUT_MIN		3
+#define NA_NOPIN_RESPONSE_TIMEOUT	30
+#define NA_NOPIN_RESPONSE_TIMEOUT_MAX	60
+#define NA_NOPIN_RESPONSE_TIMEOUT_MIN	3
+#define NA_RANDOM_DATAIN_PDU_OFFSETS	0
+#define NA_RANDOM_DATAIN_SEQ_OFFSETS	0
+#define NA_RANDOM_R2T_OFFSETS		0
+#define NA_DEFAULT_ERL			0
+#define NA_DEFAULT_ERL_MAX		2
+#define NA_DEFAULT_ERL_MIN		0
+
+/* struct iscsi_tpg_attrib sanity values */
+#define TA_AUTHENTICATION		1
+#define TA_LOGIN_TIMEOUT		15
+#define TA_LOGIN_TIMEOUT_MAX		30
+#define TA_LOGIN_TIMEOUT_MIN		5
+#define TA_NETIF_TIMEOUT		2
+#define TA_NETIF_TIMEOUT_MAX		15
+#define TA_NETIF_TIMEOUT_MIN		2
+#define TA_GENERATE_NODE_ACLS		0
+#define TA_DEFAULT_CMDSN_DEPTH		16
+#define TA_DEFAULT_CMDSN_DEPTH_MAX	512
+#define TA_DEFAULT_CMDSN_DEPTH_MIN	1
+#define TA_CACHE_DYNAMIC_ACLS		0
+/* Enabled by default in demo mode (generic_node_acls=1) */
+#define TA_DEMO_MODE_WRITE_PROTECT	1
+/* Disabled by default in production mode w/ explict ACLs */
+#define TA_PROD_MODE_WRITE_PROTECT	0
+#define TA_CACHE_CORE_NPS		0
+
+
+#define ISCSI_IOV_DATA_BUFFER		5
+
+enum tpg_np_network_transport_table {
+	ISCSI_TCP				= 0,
+	ISCSI_SCTP_TCP				= 1,
+	ISCSI_SCTP_UDP				= 2,
+	ISCSI_IWARP_TCP				= 3,
+	ISCSI_IWARP_SCTP			= 4,
+	ISCSI_INFINIBAND			= 5,
+};
+
+/* RFC-3720 7.1.4  Standard Connection State Diagram for a Target */
+enum target_conn_state_table {
+	TARG_CONN_STATE_FREE			= 0x1,
+	TARG_CONN_STATE_XPT_UP			= 0x3,
+	TARG_CONN_STATE_IN_LOGIN		= 0x4,
+	TARG_CONN_STATE_LOGGED_IN		= 0x5,
+	TARG_CONN_STATE_IN_LOGOUT		= 0x6,
+	TARG_CONN_STATE_LOGOUT_REQUESTED	= 0x7,
+	TARG_CONN_STATE_CLEANUP_WAIT		= 0x8,
+};
+
+/* RFC-3720 7.3.2  Session State Diagram for a Target */
+enum target_sess_state_table {
+	TARG_SESS_STATE_FREE			= 0x1,
+	TARG_SESS_STATE_ACTIVE			= 0x2,
+	TARG_SESS_STATE_LOGGED_IN		= 0x3,
+	TARG_SESS_STATE_FAILED			= 0x4,
+	TARG_SESS_STATE_IN_CONTINUE		= 0x5,
+};
+
+/* struct iscsi_data_count->type */
+enum data_count_type {
+	ISCSI_RX_DATA	= 1,
+	ISCSI_TX_DATA	= 2,
+};
+
+/* struct iscsi_datain_req->dr_complete */
+enum datain_req_comp_table {
+	DATAIN_COMPLETE_NORMAL			= 1,
+	DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY = 2,
+	DATAIN_COMPLETE_CONNECTION_RECOVERY	= 3,
+};
+
+/* struct iscsi_datain_req->recovery */
+enum datain_req_rec_table {
+	DATAIN_WITHIN_COMMAND_RECOVERY		= 1,
+	DATAIN_CONNECTION_RECOVERY		= 2,
+};
+
+/* struct iscsi_portal_group->state */
+enum tpg_state_table {
+	TPG_STATE_FREE				= 0,
+	TPG_STATE_ACTIVE			= 1,
+	TPG_STATE_INACTIVE			= 2,
+	TPG_STATE_COLD_RESET			= 3,
+};
+
+/* struct iscsi_tiqn->tiqn_state */
+enum tiqn_state_table {
+	TIQN_STATE_ACTIVE			= 1,
+	TIQN_STATE_SHUTDOWN			= 2,
+};
+
+/* struct iscsi_cmd->cmd_flags */
+enum cmd_flags_table {
+	ICF_GOT_LAST_DATAOUT			= 0x00000001,
+	ICF_GOT_DATACK_SNACK			= 0x00000002,
+	ICF_NON_IMMEDIATE_UNSOLICITED_DATA	= 0x00000004,
+	ICF_SENT_LAST_R2T			= 0x00000008,
+	ICF_WITHIN_COMMAND_RECOVERY		= 0x00000010,
+	ICF_CONTIG_MEMORY			= 0x00000020,
+	ICF_ATTACHED_TO_RQUEUE			= 0x00000040,
+	ICF_OOO_CMDSN				= 0x00000080,
+	ICF_REJECT_FAIL_CONN			= 0x00000100,
+};
+
+/* struct iscsi_cmd->i_state */
+enum cmd_i_state_table {
+	ISTATE_NO_STATE			= 0,
+	ISTATE_NEW_CMD			= 1,
+	ISTATE_DEFERRED_CMD		= 2,
+	ISTATE_UNSOLICITED_DATA		= 3,
+	ISTATE_RECEIVE_DATAOUT		= 4,
+	ISTATE_RECEIVE_DATAOUT_RECOVERY	= 5,
+	ISTATE_RECEIVED_LAST_DATAOUT	= 6,
+	ISTATE_WITHIN_DATAOUT_RECOVERY	= 7,
+	ISTATE_IN_CONNECTION_RECOVERY	= 8,
+	ISTATE_RECEIVED_TASKMGT		= 9,
+	ISTATE_SEND_ASYNCMSG		= 10,
+	ISTATE_SENT_ASYNCMSG		= 11,
+	ISTATE_SEND_DATAIN		= 12,
+	ISTATE_SEND_LAST_DATAIN		= 13,
+	ISTATE_SENT_LAST_DATAIN		= 14,
+	ISTATE_SEND_LOGOUTRSP		= 15,
+	ISTATE_SENT_LOGOUTRSP		= 16,
+	ISTATE_SEND_NOPIN		= 17,
+	ISTATE_SENT_NOPIN		= 18,
+	ISTATE_SEND_REJECT		= 19,
+	ISTATE_SENT_REJECT		= 20,
+	ISTATE_SEND_R2T			= 21,
+	ISTATE_SENT_R2T			= 22,
+	ISTATE_SEND_R2T_RECOVERY	= 23,
+	ISTATE_SENT_R2T_RECOVERY	= 24,
+	ISTATE_SEND_LAST_R2T		= 25,
+	ISTATE_SENT_LAST_R2T		= 26,
+	ISTATE_SEND_LAST_R2T_RECOVERY	= 27,
+	ISTATE_SENT_LAST_R2T_RECOVERY	= 28,
+	ISTATE_SEND_STATUS		= 29,
+	ISTATE_SEND_STATUS_BROKEN_PC	= 30,
+	ISTATE_SENT_STATUS		= 31,
+	ISTATE_SEND_STATUS_RECOVERY	= 32,
+	ISTATE_SENT_STATUS_RECOVERY	= 33,
+	ISTATE_SEND_TASKMGTRSP		= 34,
+	ISTATE_SENT_TASKMGTRSP		= 35,
+	ISTATE_SEND_TEXTRSP		= 36,
+	ISTATE_SENT_TEXTRSP		= 37,
+	ISTATE_SEND_NOPIN_WANT_RESPONSE	= 38,
+	ISTATE_SENT_NOPIN_WANT_RESPONSE	= 39,
+	ISTATE_SEND_NOPIN_NO_RESPONSE	= 40,
+	ISTATE_REMOVE			= 41,
+	ISTATE_FREE			= 42,
+};
+
+/* Used for iscsi_recover_cmdsn() return values */
+enum recover_cmdsn_ret_table {
+	CMDSN_ERROR_CANNOT_RECOVER	= -1,
+	CMDSN_NORMAL_OPERATION		= 0,
+	CMDSN_LOWER_THAN_EXP		= 1,
+	CMDSN_HIGHER_THAN_EXP		= 2,
+};
+
+/* Used for iscsi_handle_immediate_data() return values */
+enum immedate_data_ret_table {
+	IMMEDIATE_DATA_CANNOT_RECOVER	= -1,
+	IMMEDIATE_DATA_NORMAL_OPERATION = 0,
+	IMMEDIATE_DATA_ERL1_CRC_FAILURE = 1,
+};
+
+/* Used for iscsi_decide_dataout_action() return values */
+enum dataout_action_ret_table {
+	DATAOUT_CANNOT_RECOVER		= -1,
+	DATAOUT_NORMAL			= 0,
+	DATAOUT_SEND_R2T		= 1,
+	DATAOUT_SEND_TO_TRANSPORT	= 2,
+	DATAOUT_WITHIN_COMMAND_RECOVERY = 3,
+};
+
+/* Used for struct iscsi_node_auth->naf_flags */
+enum naf_flags_table {
+	NAF_USERID_SET			= 0x01,
+	NAF_PASSWORD_SET		= 0x02,
+	NAF_USERID_IN_SET		= 0x04,
+	NAF_PASSWORD_IN_SET		= 0x08,
+};
+
+/* Used by various struct timer_list to manage iSCSI specific state */
+enum iscsi_timer_flags_table {
+	ISCSI_TF_RUNNING		= 0x01,
+	ISCSI_TF_STOP			= 0x02,
+	ISCSI_TF_EXPIRED		= 0x04,
+};
+
+/* Used for struct iscsi_np->np_flags */
+enum np_flags_table {
+	NPF_IP_NETWORK		= 0x00,
+};
+
+/* Used for struct iscsi_np->np_thread_state */
+enum np_thread_state_table {
+	ISCSI_NP_THREAD_ACTIVE		= 1,
+	ISCSI_NP_THREAD_INACTIVE	= 2,
+	ISCSI_NP_THREAD_RESET		= 3,
+	ISCSI_NP_THREAD_SHUTDOWN	= 4,
+	ISCSI_NP_THREAD_EXIT		= 5,
+};
+
+struct iscsi_conn_ops {
+	u8	HeaderDigest;			/* [0,1] == [None,CRC32C] */
+	u8	DataDigest;			/* [0,1] == [None,CRC32C] */
+	u32	MaxRecvDataSegmentLength;	/* [512..2**24-1] */
+	u8	OFMarker;			/* [0,1] == [No,Yes] */
+	u8	IFMarker;			/* [0,1] == [No,Yes] */
+	u32	OFMarkInt;			/* [1..65535] */
+	u32	IFMarkInt;			/* [1..65535] */
+};
+
+struct iscsi_sess_ops {
+	char	InitiatorName[224];
+	char	InitiatorAlias[256];
+	char	TargetName[224];
+	char	TargetAlias[256];
+	char	TargetAddress[256];
+	u16	TargetPortalGroupTag;		/* [0..65535] */
+	u16	MaxConnections;			/* [1..65535] */
+	u8	InitialR2T;			/* [0,1] == [No,Yes] */
+	u8	ImmediateData;			/* [0,1] == [No,Yes] */
+	u32	MaxBurstLength;			/* [512..2**24-1] */
+	u32	FirstBurstLength;		/* [512..2**24-1] */
+	u16	DefaultTime2Wait;		/* [0..3600] */
+	u16	DefaultTime2Retain;		/* [0..3600] */
+	u16	MaxOutstandingR2T;		/* [1..65535] */
+	u8	DataPDUInOrder;			/* [0,1] == [No,Yes] */
+	u8	DataSequenceInOrder;		/* [0,1] == [No,Yes] */
+	u8	ErrorRecoveryLevel;		/* [0..2] */
+	u8	SessionType;			/* [0,1] == [Normal,Discovery]*/
+};
+
+struct iscsi_queue_req {
+	int			state;
+	struct iscsi_cmd	*cmd;
+	struct list_head	qr_list;
+};
+
+struct iscsi_data_count {
+	int			data_length;
+	int			sync_and_steering;
+	enum data_count_type	type;
+	u32			iov_count;
+	u32			ss_iov_count;
+	u32			ss_marker_count;
+	struct kvec		*iov;
+};
+
+struct iscsi_param_list {
+	struct list_head	param_list;
+	struct list_head	extra_response_list;
+};
+
+struct iscsi_datain_req {
+	enum datain_req_comp_table dr_complete;
+	int			generate_recovery_values;
+	enum datain_req_rec_table recovery;
+	u32			begrun;
+	u32			runlength;
+	u32			data_length;
+	u32			data_offset;
+	u32			data_offset_end;
+	u32			data_sn;
+	u32			next_burst_len;
+	u32			read_data_done;
+	u32			seq_send_order;
+	struct list_head	dr_list;
+} ____cacheline_aligned;
+
+struct iscsi_ooo_cmdsn {
+	u16			cid;
+	u32			batch_count;
+	u32			cmdsn;
+	u32			exp_cmdsn;
+	struct iscsi_cmd	*cmd;
+	struct list_head	ooo_list;
+} ____cacheline_aligned;
+
+struct iscsi_datain {
+	u8			flags;
+	u32			data_sn;
+	u32			length;
+	u32			offset;
+} ____cacheline_aligned;
+
+struct iscsi_r2t {
+	int			seq_complete;
+	int			recovery_r2t;
+	int			sent_r2t;
+	u32			r2t_sn;
+	u32			offset;
+	u32			targ_xfer_tag;
+	u32			xfer_len;
+	struct list_head	r2t_list;
+} ____cacheline_aligned;
+
+struct iscsi_cmd {
+	enum iscsi_timer_flags_table dataout_timer_flags;
+	/* DataOUT timeout retries */
+	u8			dataout_timeout_retries;
+	/* Within command recovery count */
+	u8			error_recovery_count;
+	/* iSCSI dependent state for out or order CmdSNs */
+	enum cmd_i_state_table	deferred_i_state;
+	/* iSCSI dependent state */
+	enum cmd_i_state_table	i_state;
+	/* Command is an immediate command (ISCSI_OP_IMMEDIATE set) */
+	u8			immediate_cmd;
+	/* Immediate data present */
+	u8			immediate_data;
+	/* iSCSI Opcode */
+	u8			iscsi_opcode;
+	/* iSCSI Response Code */
+	u8			iscsi_response;
+	/* Logout reason when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
+	u8			logout_reason;
+	/* Logout response code when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
+	u8			logout_response;
+	/* MaxCmdSN has been incremented */
+	u8			maxcmdsn_inc;
+	/* Immediate Unsolicited Dataout */
+	u8			unsolicited_data;
+	/* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
+	u16			logout_cid;
+	/* Command flags */
+	enum cmd_flags_table	cmd_flags;
+	/* Initiator Task Tag assigned from Initiator */
+	u32			init_task_tag;
+	/* Target Transfer Tag assigned from Target */
+	u32			targ_xfer_tag;
+	/* CmdSN assigned from Initiator */
+	u32			cmd_sn;
+	/* ExpStatSN assigned from Initiator */
+	u32			exp_stat_sn;
+	/* StatSN assigned to this ITT */
+	u32			stat_sn;
+	/* DataSN Counter */
+	u32			data_sn;
+	/* R2TSN Counter */
+	u32			r2t_sn;
+	/* Last DataSN acknowledged via DataAck SNACK */
+	u32			acked_data_sn;
+	/* Used for echoing NOPOUT ping data */
+	u32			buf_ptr_size;
+	/* Used to store DataDigest */
+	u32			data_crc;
+	/* Total size in bytes associated with command */
+	u32			data_length;
+	/* Counter for MaxOutstandingR2T */
+	u32			outstanding_r2ts;
+	/* Next R2T Offset when DataSequenceInOrder=Yes */
+	u32			r2t_offset;
+	/* Iovec current and orig count for iscsi_cmd->iov_data */
+	u32			iov_data_count;
+	u32			orig_iov_data_count;
+	/* Number of miscellaneous iovecs used for IP stack calls */
+	u32			iov_misc_count;
+	/* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+	u32			pdu_count;
+	/* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */
+	u32			pdu_send_order;
+	/* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+	u32			pdu_start;
+	/* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
+	u32			seq_send_order;
+	/* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
+	u32			seq_count;
+	/* Current struct iscsi_seq in struct iscsi_cmd->seq_list */
+	u32			seq_no;
+	/* Lowest offset in current DataOUT sequence */
+	u32			seq_start_offset;
+	/* Highest offset in current DataOUT sequence */
+	u32			seq_end_offset;
+	/* Total size in bytes received so far of READ data */
+	u32			read_data_done;
+	/* Total size in bytes received so far of WRITE data */
+	u32			write_data_done;
+	/* Counter for FirstBurstLength key */
+	u32			first_burst_len;
+	/* Counter for MaxBurstLength key */
+	u32			next_burst_len;
+	/* Transfer size used for IP stack calls */
+	u32			tx_size;
+	/* Buffer used for various purposes */
+	void			*buf_ptr;
+	/* See include/linux/dma-mapping.h */
+	enum dma_data_direction	data_direction;
+	/* iSCSI PDU Header + CRC */
+	unsigned char		pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN];
+	/* Number of times struct iscsi_cmd is present in immediate queue */
+	atomic_t		immed_queue_count;
+	atomic_t		response_queue_count;
+	spinlock_t		datain_lock;
+	spinlock_t		dataout_timeout_lock;
+	/* spinlock for protecting struct iscsi_cmd->i_state */
+	spinlock_t		istate_lock;
+	/* spinlock for adding within command recovery entries */
+	spinlock_t		error_lock;
+	/* spinlock for adding R2Ts */
+	spinlock_t		r2t_lock;
+	/* DataIN List */
+	struct list_head	datain_list;
+	/* R2T List */
+	struct list_head	cmd_r2t_list;
+	struct completion	reject_comp;
+	/* Timer for DataOUT */
+	struct timer_list	dataout_timer;
+	/* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
+	struct kvec		*iov_data;
+	/* Iovecs for miscellaneous purposes */
+#define ISCSI_MISC_IOVECS			5
+	struct kvec		iov_misc[ISCSI_MISC_IOVECS];
+	/* Array of struct iscsi_pdu used for DataPDUInOrder=No */
+	struct iscsi_pdu	*pdu_list;
+	/* Current struct iscsi_pdu used for DataPDUInOrder=No */
+	struct iscsi_pdu	*pdu_ptr;
+	/* Array of struct iscsi_seq used for DataSequenceInOrder=No */
+	struct iscsi_seq	*seq_list;
+	/* Current struct iscsi_seq used for DataSequenceInOrder=No */
+	struct iscsi_seq	*seq_ptr;
+	/* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */
+	struct iscsi_tmr_req	*tmr_req;
+	/* Connection this command is alligient to */
+	struct iscsi_conn	*conn;
+	/* Pointer to connection recovery entry */
+	struct iscsi_conn_recovery *cr;
+	/* Session the command is part of,  used for connection recovery */
+	struct iscsi_session	*sess;
+	/* list_head for connection list */
+	struct list_head	i_list;
+	/* The TCM I/O descriptor that is accessed via container_of() */
+	struct se_cmd		se_cmd;
+	/* Sense buffer that will be mapped into outgoing status */
+#define ISCSI_SENSE_BUFFER_LEN          (TRANSPORT_SENSE_BUFFER + 2)
+	unsigned char		sense_buffer[ISCSI_SENSE_BUFFER_LEN];
+
+	struct scatterlist	*t_mem_sg;
+	u32			t_mem_sg_nents;
+
+	u32			padding;
+	u8			pad_bytes[4];
+
+	struct scatterlist	*first_data_sg;
+	u32			first_data_sg_off;
+	u32			kmapped_nents;
+
+}  ____cacheline_aligned;
+
+struct iscsi_tmr_req {
+	bool			task_reassign:1;
+	u32			ref_cmd_sn;
+	u32			exp_data_sn;
+	struct iscsi_conn_recovery *conn_recovery;
+	struct se_tmr_req	*se_tmr_req;
+};
+
+struct iscsi_conn {
+	wait_queue_head_t	queues_wq;
+	/* Authentication Successful for this connection */
+	u8			auth_complete;
+	/* State connection is currently in */
+	u8			conn_state;
+	u8			conn_logout_reason;
+	u8			network_transport;
+	enum iscsi_timer_flags_table nopin_timer_flags;
+	enum iscsi_timer_flags_table nopin_response_timer_flags;
+	u8			tx_immediate_queue;
+	u8			tx_response_queue;
+	/* Used to know what thread encountered a transport failure */
+	u8			which_thread;
+	/* connection id assigned by the Initiator */
+	u16			cid;
+	/* Remote TCP Port */
+	u16			login_port;
+	u16			local_port;
+	int			net_size;
+	u32			auth_id;
+	u32			conn_flags;
+	/* Used for iscsi_tx_login_rsp() */
+	u32			login_itt;
+	u32			exp_statsn;
+	/* Per connection status sequence number */
+	u32			stat_sn;
+	/* IFMarkInt's Current Value */
+	u32			if_marker;
+	/* OFMarkInt's Current Value */
+	u32			of_marker;
+	/* Used for calculating OFMarker offset to next PDU */
+	u32			of_marker_offset;
+	/* Complete Bad PDU for sending reject */
+	unsigned char		bad_hdr[ISCSI_HDR_LEN];
+#define IPV6_ADDRESS_SPACE				48
+	unsigned char		login_ip[IPV6_ADDRESS_SPACE];
+	unsigned char		local_ip[IPV6_ADDRESS_SPACE];
+	int			conn_usage_count;
+	int			conn_waiting_on_uc;
+	atomic_t		check_immediate_queue;
+	atomic_t		conn_logout_remove;
+	atomic_t		connection_exit;
+	atomic_t		connection_recovery;
+	atomic_t		connection_reinstatement;
+	atomic_t		connection_wait_rcfr;
+	atomic_t		sleep_on_conn_wait_comp;
+	atomic_t		transport_failed;
+	struct completion	conn_post_wait_comp;
+	struct completion	conn_wait_comp;
+	struct completion	conn_wait_rcfr_comp;
+	struct completion	conn_waiting_on_uc_comp;
+	struct completion	conn_logout_comp;
+	struct completion	tx_half_close_comp;
+	struct completion	rx_half_close_comp;
+	/* socket used by this connection */
+	struct socket		*sock;
+	struct timer_list	nopin_timer;
+	struct timer_list	nopin_response_timer;
+	struct timer_list	transport_timer;
+	/* Spinlock used for add/deleting cmd's from conn_cmd_list */
+	spinlock_t		cmd_lock;
+	spinlock_t		conn_usage_lock;
+	spinlock_t		immed_queue_lock;
+	spinlock_t		nopin_timer_lock;
+	spinlock_t		response_queue_lock;
+	spinlock_t		state_lock;
+	/* libcrypto RX and TX contexts for crc32c */
+	struct hash_desc	conn_rx_hash;
+	struct hash_desc	conn_tx_hash;
+	/* Used for scheduling TX and RX connection kthreads */
+	cpumask_var_t		conn_cpumask;
+	unsigned int		conn_rx_reset_cpumask:1;
+	unsigned int		conn_tx_reset_cpumask:1;
+	/* list_head of struct iscsi_cmd for this connection */
+	struct list_head	conn_cmd_list;
+	struct list_head	immed_queue_list;
+	struct list_head	response_queue_list;
+	struct iscsi_conn_ops	*conn_ops;
+	struct iscsi_param_list	*param_list;
+	/* Used for per connection auth state machine */
+	void			*auth_protocol;
+	struct iscsi_login_thread_s *login_thread;
+	struct iscsi_portal_group *tpg;
+	/* Pointer to parent session */
+	struct iscsi_session	*sess;
+	/* Pointer to thread_set in use for this conn's threads */
+	struct iscsi_thread_set	*thread_set;
+	/* list_head for session connection list */
+	struct list_head	conn_list;
+} ____cacheline_aligned;
+
+struct iscsi_conn_recovery {
+	u16			cid;
+	u32			cmd_count;
+	u32			maxrecvdatasegmentlength;
+	int			ready_for_reallegiance;
+	struct list_head	conn_recovery_cmd_list;
+	spinlock_t		conn_recovery_cmd_lock;
+	struct timer_list	time2retain_timer;
+	struct iscsi_session	*sess;
+	struct list_head	cr_list;
+}  ____cacheline_aligned;
+
+struct iscsi_session {
+	u8			initiator_vendor;
+	u8			isid[6];
+	enum iscsi_timer_flags_table time2retain_timer_flags;
+	u8			version_active;
+	u16			cid_called;
+	u16			conn_recovery_count;
+	u16			tsih;
+	/* state session is currently in */
+	u32			session_state;
+	/* session wide counter: initiator assigned task tag */
+	u32			init_task_tag;
+	/* session wide counter: target assigned task tag */
+	u32			targ_xfer_tag;
+	u32			cmdsn_window;
+
+	/* protects cmdsn values */
+	struct mutex		cmdsn_mutex;
+	/* session wide counter: expected command sequence number */
+	u32			exp_cmd_sn;
+	/* session wide counter: maximum allowed command sequence number */
+	u32			max_cmd_sn;
+	struct list_head	sess_ooo_cmdsn_list;
+
+	/* LIO specific session ID */
+	u32			sid;
+	char			auth_type[8];
+	/* unique within the target */
+	int			session_index;
+	/* Used for session reference counting */
+	int			session_usage_count;
+	int			session_waiting_on_uc;
+	u32			cmd_pdus;
+	u32			rsp_pdus;
+	u64			tx_data_octets;
+	u64			rx_data_octets;
+	u32			conn_digest_errors;
+	u32			conn_timeout_errors;
+	u64			creation_time;
+	spinlock_t		session_stats_lock;
+	/* Number of active connections */
+	atomic_t		nconn;
+	atomic_t		session_continuation;
+	atomic_t		session_fall_back_to_erl0;
+	atomic_t		session_logout;
+	atomic_t		session_reinstatement;
+	atomic_t		session_stop_active;
+	atomic_t		sleep_on_sess_wait_comp;
+	/* connection list */
+	struct list_head	sess_conn_list;
+	struct list_head	cr_active_list;
+	struct list_head	cr_inactive_list;
+	spinlock_t		conn_lock;
+	spinlock_t		cr_a_lock;
+	spinlock_t		cr_i_lock;
+	spinlock_t		session_usage_lock;
+	spinlock_t		ttt_lock;
+	struct completion	async_msg_comp;
+	struct completion	reinstatement_comp;
+	struct completion	session_wait_comp;
+	struct completion	session_waiting_on_uc_comp;
+	struct timer_list	time2retain_timer;
+	struct iscsi_sess_ops	*sess_ops;
+	struct se_session	*se_sess;
+	struct iscsi_portal_group *tpg;
+} ____cacheline_aligned;
+
+struct iscsi_login {
+	u8 auth_complete;
+	u8 checked_for_existing;
+	u8 current_stage;
+	u8 leading_connection;
+	u8 first_request;
+	u8 version_min;
+	u8 version_max;
+	char isid[6];
+	u32 cmd_sn;
+	u32 init_task_tag;
+	u32 initial_exp_statsn;
+	u32 rsp_length;
+	u16 cid;
+	u16 tsih;
+	char *req;
+	char *rsp;
+	char *req_buf;
+	char *rsp_buf;
+} ____cacheline_aligned;
+
+struct iscsi_node_attrib {
+	u32			dataout_timeout;
+	u32			dataout_timeout_retries;
+	u32			default_erl;
+	u32			nopin_timeout;
+	u32			nopin_response_timeout;
+	u32			random_datain_pdu_offsets;
+	u32			random_datain_seq_offsets;
+	u32			random_r2t_offsets;
+	u32			tmr_cold_reset;
+	u32			tmr_warm_reset;
+	struct iscsi_node_acl *nacl;
+};
+
+struct se_dev_entry_s;
+
+struct iscsi_node_auth {
+	enum naf_flags_table	naf_flags;
+	int			authenticate_target;
+	/* Used for iscsit_global->discovery_auth,
+	 * set to zero (auth disabled) by default */
+	int			enforce_discovery_auth;
+#define MAX_USER_LEN				256
+#define MAX_PASS_LEN				256
+	char			userid[MAX_USER_LEN];
+	char			password[MAX_PASS_LEN];
+	char			userid_mutual[MAX_USER_LEN];
+	char			password_mutual[MAX_PASS_LEN];
+};
+
+#include "iscsi_target_stat.h"
+
+struct iscsi_node_stat_grps {
+	struct config_group	iscsi_sess_stats_group;
+	struct config_group	iscsi_conn_stats_group;
+};
+
+struct iscsi_node_acl {
+	struct iscsi_node_attrib node_attrib;
+	struct iscsi_node_auth	node_auth;
+	struct iscsi_node_stat_grps node_stat_grps;
+	struct se_node_acl	se_node_acl;
+};
+
+#define NODE_STAT_GRPS(nacl)	(&(nacl)->node_stat_grps)
+
+#define ISCSI_NODE_ATTRIB(t)	(&(t)->node_attrib)
+#define ISCSI_NODE_AUTH(t)	(&(t)->node_auth)
+
+struct iscsi_tpg_attrib {
+	u32			authentication;
+	u32			login_timeout;
+	u32			netif_timeout;
+	u32			generate_node_acls;
+	u32			cache_dynamic_acls;
+	u32			default_cmdsn_depth;
+	u32			demo_mode_write_protect;
+	u32			prod_mode_write_protect;
+	struct iscsi_portal_group *tpg;
+};
+
+struct iscsi_np {
+	int			np_network_transport;
+	int			np_ip_proto;
+	int			np_sock_type;
+	enum np_thread_state_table np_thread_state;
+	enum iscsi_timer_flags_table np_login_timer_flags;
+	u32			np_exports;
+	enum np_flags_table	np_flags;
+	unsigned char		np_ip[IPV6_ADDRESS_SPACE];
+	u16			np_port;
+	spinlock_t		np_thread_lock;
+	struct completion	np_restart_comp;
+	struct socket		*np_socket;
+	struct __kernel_sockaddr_storage np_sockaddr;
+	struct task_struct	*np_thread;
+	struct timer_list	np_login_timer;
+	struct iscsi_portal_group *np_login_tpg;
+	struct list_head	np_list;
+} ____cacheline_aligned;
+
+struct iscsi_tpg_np {
+	struct iscsi_np		*tpg_np;
+	struct iscsi_portal_group *tpg;
+	struct iscsi_tpg_np	*tpg_np_parent;
+	struct list_head	tpg_np_list;
+	struct list_head	tpg_np_child_list;
+	struct list_head	tpg_np_parent_list;
+	struct se_tpg_np	se_tpg_np;
+	spinlock_t		tpg_np_parent_lock;
+};
+
+struct iscsi_portal_group {
+	unsigned char		tpg_chap_id;
+	/* TPG State */
+	enum tpg_state_table	tpg_state;
+	/* Target Portal Group Tag */
+	u16			tpgt;
+	/* Id assigned to target sessions */
+	u16			ntsih;
+	/* Number of active sessions */
+	u32			nsessions;
+	/* Number of Network Portals available for this TPG */
+	u32			num_tpg_nps;
+	/* Per TPG LIO specific session ID. */
+	u32			sid;
+	/* Spinlock for adding/removing Network Portals */
+	spinlock_t		tpg_np_lock;
+	spinlock_t		tpg_state_lock;
+	struct se_portal_group tpg_se_tpg;
+	struct mutex		tpg_access_lock;
+	struct mutex		np_login_lock;
+	struct iscsi_tpg_attrib	tpg_attrib;
+	/* Pointer to default list of iSCSI parameters for TPG */
+	struct iscsi_param_list	*param_list;
+	struct iscsi_tiqn	*tpg_tiqn;
+	struct list_head	tpg_gnp_list;
+	struct list_head	tpg_list;
+} ____cacheline_aligned;
+
+#define ISCSI_TPG_C(c)		((struct iscsi_portal_group *)(c)->tpg)
+#define ISCSI_TPG_LUN(c, l)  ((iscsi_tpg_list_t *)(c)->tpg->tpg_lun_list_t[l])
+#define ISCSI_TPG_S(s)		((struct iscsi_portal_group *)(s)->tpg)
+#define ISCSI_TPG_ATTRIB(t)	(&(t)->tpg_attrib)
+#define SE_TPG(tpg)		(&(tpg)->tpg_se_tpg)
+
+struct iscsi_wwn_stat_grps {
+	struct config_group	iscsi_stat_group;
+	struct config_group	iscsi_instance_group;
+	struct config_group	iscsi_sess_err_group;
+	struct config_group	iscsi_tgt_attr_group;
+	struct config_group	iscsi_login_stats_group;
+	struct config_group	iscsi_logout_stats_group;
+};
+
+struct iscsi_tiqn {
+#define ISCSI_IQN_LEN				224
+	unsigned char		tiqn[ISCSI_IQN_LEN];
+	enum tiqn_state_table	tiqn_state;
+	int			tiqn_access_count;
+	u32			tiqn_active_tpgs;
+	u32			tiqn_ntpgs;
+	u32			tiqn_num_tpg_nps;
+	u32			tiqn_nsessions;
+	struct list_head	tiqn_list;
+	struct list_head	tiqn_tpg_list;
+	spinlock_t		tiqn_state_lock;
+	spinlock_t		tiqn_tpg_lock;
+	struct se_wwn		tiqn_wwn;
+	struct iscsi_wwn_stat_grps tiqn_stat_grps;
+	int			tiqn_index;
+	struct iscsi_sess_err_stats  sess_err_stats;
+	struct iscsi_login_stats     login_stats;
+	struct iscsi_logout_stats    logout_stats;
+} ____cacheline_aligned;
+
+#define WWN_STAT_GRPS(tiqn)	(&(tiqn)->tiqn_stat_grps)
+
+struct iscsit_global {
+	/* In core shutdown */
+	u32			in_shutdown;
+	u32			active_ts;
+	/* Unique identifier used for the authentication daemon */
+	u32			auth_id;
+	u32			inactive_ts;
+	/* Thread Set bitmap count */
+	int			ts_bitmap_count;
+	/* Thread Set bitmap pointer */
+	unsigned long		*ts_bitmap;
+	/* Used for iSCSI discovery session authentication */
+	struct iscsi_node_acl	discovery_acl;
+	struct iscsi_portal_group	*discovery_tpg;
+};
+
+#endif /* ISCSI_TARGET_CORE_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_datain_values.c b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_datain_values.c
new file mode 100644
index 0000000..8c04951
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -0,0 +1,531 @@
+/*******************************************************************************
+ * This file contains the iSCSI Target DataIN value generation functions.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <scsi/iscsi_proto.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_datain_values.h"
+
+struct iscsi_datain_req *iscsit_allocate_datain_req(void)
+{
+	struct iscsi_datain_req *dr;
+
+	dr = kmem_cache_zalloc(lio_dr_cache, GFP_ATOMIC);
+	if (!dr) {
+		pr_err("Unable to allocate memory for"
+				" struct iscsi_datain_req\n");
+		return NULL;
+	}
+	INIT_LIST_HEAD(&dr->dr_list);
+
+	return dr;
+}
+
+void iscsit_attach_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
+{
+	spin_lock(&cmd->datain_lock);
+	list_add_tail(&dr->dr_list, &cmd->datain_list);
+	spin_unlock(&cmd->datain_lock);
+}
+
+void iscsit_free_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
+{
+	spin_lock(&cmd->datain_lock);
+	list_del(&dr->dr_list);
+	spin_unlock(&cmd->datain_lock);
+
+	kmem_cache_free(lio_dr_cache, dr);
+}
+
+void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd)
+{
+	struct iscsi_datain_req *dr, *dr_tmp;
+
+	spin_lock(&cmd->datain_lock);
+	list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, dr_list) {
+		list_del(&dr->dr_list);
+		kmem_cache_free(lio_dr_cache, dr);
+	}
+	spin_unlock(&cmd->datain_lock);
+}
+
+struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *cmd)
+{
+	struct iscsi_datain_req *dr;
+
+	if (list_empty(&cmd->datain_list)) {
+		pr_err("cmd->datain_list is empty for ITT:"
+			" 0x%08x\n", cmd->init_task_tag);
+		return NULL;
+	}
+	list_for_each_entry(dr, &cmd->datain_list, dr_list)
+		break;
+
+	return dr;
+}
+
+/*
+ *	For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=Yes.
+ */
+static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_yes(
+	struct iscsi_cmd *cmd,
+	struct iscsi_datain *datain)
+{
+	u32 next_burst_len, read_data_done, read_data_left;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_datain_req *dr;
+
+	dr = iscsit_get_datain_req(cmd);
+	if (!dr)
+		return NULL;
+
+	if (dr->recovery && dr->generate_recovery_values) {
+		if (iscsit_create_recovery_datain_values_datasequenceinorder_yes(
+					cmd, dr) < 0)
+			return NULL;
+
+		dr->generate_recovery_values = 0;
+	}
+
+	next_burst_len = (!dr->recovery) ?
+			cmd->next_burst_len : dr->next_burst_len;
+	read_data_done = (!dr->recovery) ?
+			cmd->read_data_done : dr->read_data_done;
+
+	read_data_left = (cmd->data_length - read_data_done);
+	if (!read_data_left) {
+		pr_err("ITT: 0x%08x read_data_left is zero!\n",
+				cmd->init_task_tag);
+		return NULL;
+	}
+
+	if ((read_data_left <= conn->conn_ops->MaxRecvDataSegmentLength) &&
+	    (read_data_left <= (conn->sess->sess_ops->MaxBurstLength -
+	     next_burst_len))) {
+		datain->length = read_data_left;
+
+		datain->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
+		if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+			datain->flags |= ISCSI_FLAG_DATA_ACK;
+	} else {
+		if ((next_burst_len +
+		     conn->conn_ops->MaxRecvDataSegmentLength) <
+		     conn->sess->sess_ops->MaxBurstLength) {
+			datain->length =
+				conn->conn_ops->MaxRecvDataSegmentLength;
+			next_burst_len += datain->length;
+		} else {
+			datain->length = (conn->sess->sess_ops->MaxBurstLength -
+					  next_burst_len);
+			next_burst_len = 0;
+
+			datain->flags |= ISCSI_FLAG_CMD_FINAL;
+			if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+				datain->flags |= ISCSI_FLAG_DATA_ACK;
+		}
+	}
+
+	datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
+	datain->offset = read_data_done;
+
+	if (!dr->recovery) {
+		cmd->next_burst_len = next_burst_len;
+		cmd->read_data_done += datain->length;
+	} else {
+		dr->next_burst_len = next_burst_len;
+		dr->read_data_done += datain->length;
+	}
+
+	if (!dr->recovery) {
+		if (datain->flags & ISCSI_FLAG_DATA_STATUS)
+			dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+		return dr;
+	}
+
+	if (!dr->runlength) {
+		if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
+			dr->dr_complete =
+			    (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+				DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+				DATAIN_COMPLETE_CONNECTION_RECOVERY;
+		}
+	} else {
+		if ((dr->begrun + dr->runlength) == dr->data_sn) {
+			dr->dr_complete =
+			    (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+				DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+				DATAIN_COMPLETE_CONNECTION_RECOVERY;
+		}
+	}
+
+	return dr;
+}
+
+/*
+ *	For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=Yes.
+ */
+static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
+	struct iscsi_cmd *cmd,
+	struct iscsi_datain *datain)
+{
+	u32 offset, read_data_done, read_data_left, seq_send_order;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_datain_req *dr;
+	struct iscsi_seq *seq;
+
+	dr = iscsit_get_datain_req(cmd);
+	if (!dr)
+		return NULL;
+
+	if (dr->recovery && dr->generate_recovery_values) {
+		if (iscsit_create_recovery_datain_values_datasequenceinorder_no(
+					cmd, dr) < 0)
+			return NULL;
+
+		dr->generate_recovery_values = 0;
+	}
+
+	read_data_done = (!dr->recovery) ?
+			cmd->read_data_done : dr->read_data_done;
+	seq_send_order = (!dr->recovery) ?
+			cmd->seq_send_order : dr->seq_send_order;
+
+	read_data_left = (cmd->data_length - read_data_done);
+	if (!read_data_left) {
+		pr_err("ITT: 0x%08x read_data_left is zero!\n",
+				cmd->init_task_tag);
+		return NULL;
+	}
+
+	seq = iscsit_get_seq_holder_for_datain(cmd, seq_send_order);
+	if (!seq)
+		return NULL;
+
+	seq->sent = 1;
+
+	if (!dr->recovery && !seq->next_burst_len)
+		seq->first_datasn = cmd->data_sn;
+
+	offset = (seq->offset + seq->next_burst_len);
+
+	if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
+	     cmd->data_length) {
+		datain->length = (cmd->data_length - offset);
+		datain->offset = offset;
+
+		datain->flags |= ISCSI_FLAG_CMD_FINAL;
+		if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+			datain->flags |= ISCSI_FLAG_DATA_ACK;
+
+		seq->next_burst_len = 0;
+		seq_send_order++;
+	} else {
+		if ((seq->next_burst_len +
+		     conn->conn_ops->MaxRecvDataSegmentLength) <
+		     conn->sess->sess_ops->MaxBurstLength) {
+			datain->length =
+				conn->conn_ops->MaxRecvDataSegmentLength;
+			datain->offset = (seq->offset + seq->next_burst_len);
+
+			seq->next_burst_len += datain->length;
+		} else {
+			datain->length = (conn->sess->sess_ops->MaxBurstLength -
+					  seq->next_burst_len);
+			datain->offset = (seq->offset + seq->next_burst_len);
+
+			datain->flags |= ISCSI_FLAG_CMD_FINAL;
+			if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+				datain->flags |= ISCSI_FLAG_DATA_ACK;
+
+			seq->next_burst_len = 0;
+			seq_send_order++;
+		}
+	}
+
+	if ((read_data_done + datain->length) == cmd->data_length)
+		datain->flags |= ISCSI_FLAG_DATA_STATUS;
+
+	datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
+	if (!dr->recovery) {
+		cmd->seq_send_order = seq_send_order;
+		cmd->read_data_done += datain->length;
+	} else {
+		dr->seq_send_order = seq_send_order;
+		dr->read_data_done += datain->length;
+	}
+
+	if (!dr->recovery) {
+		if (datain->flags & ISCSI_FLAG_CMD_FINAL)
+			seq->last_datasn = datain->data_sn;
+		if (datain->flags & ISCSI_FLAG_DATA_STATUS)
+			dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+		return dr;
+	}
+
+	if (!dr->runlength) {
+		if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
+			dr->dr_complete =
+			    (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+				DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+				DATAIN_COMPLETE_CONNECTION_RECOVERY;
+		}
+	} else {
+		if ((dr->begrun + dr->runlength) == dr->data_sn) {
+			dr->dr_complete =
+			    (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+				DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+				DATAIN_COMPLETE_CONNECTION_RECOVERY;
+		}
+	}
+
+	return dr;
+}
+
+/*
+ *	For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=No.
+ */
+static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no(
+	struct iscsi_cmd *cmd,
+	struct iscsi_datain *datain)
+{
+	u32 next_burst_len, read_data_done, read_data_left;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_datain_req *dr;
+	struct iscsi_pdu *pdu;
+
+	dr = iscsit_get_datain_req(cmd);
+	if (!dr)
+		return NULL;
+
+	if (dr->recovery && dr->generate_recovery_values) {
+		if (iscsit_create_recovery_datain_values_datasequenceinorder_yes(
+					cmd, dr) < 0)
+			return NULL;
+
+		dr->generate_recovery_values = 0;
+	}
+
+	next_burst_len = (!dr->recovery) ?
+			cmd->next_burst_len : dr->next_burst_len;
+	read_data_done = (!dr->recovery) ?
+			cmd->read_data_done : dr->read_data_done;
+
+	read_data_left = (cmd->data_length - read_data_done);
+	if (!read_data_left) {
+		pr_err("ITT: 0x%08x read_data_left is zero!\n",
+				cmd->init_task_tag);
+		return dr;
+	}
+
+	pdu = iscsit_get_pdu_holder_for_seq(cmd, NULL);
+	if (!pdu)
+		return dr;
+
+	if ((read_data_done + pdu->length) == cmd->data_length) {
+		pdu->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
+		if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+			pdu->flags |= ISCSI_FLAG_DATA_ACK;
+
+		next_burst_len = 0;
+	} else {
+		if ((next_burst_len + conn->conn_ops->MaxRecvDataSegmentLength) <
+		     conn->sess->sess_ops->MaxBurstLength)
+			next_burst_len += pdu->length;
+		else {
+			pdu->flags |= ISCSI_FLAG_CMD_FINAL;
+			if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+				pdu->flags |= ISCSI_FLAG_DATA_ACK;
+
+			next_burst_len = 0;
+		}
+	}
+
+	pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
+	if (!dr->recovery) {
+		cmd->next_burst_len = next_burst_len;
+		cmd->read_data_done += pdu->length;
+	} else {
+		dr->next_burst_len = next_burst_len;
+		dr->read_data_done += pdu->length;
+	}
+
+	datain->flags = pdu->flags;
+	datain->length = pdu->length;
+	datain->offset = pdu->offset;
+	datain->data_sn = pdu->data_sn;
+
+	if (!dr->recovery) {
+		if (datain->flags & ISCSI_FLAG_DATA_STATUS)
+			dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+		return dr;
+	}
+
+	if (!dr->runlength) {
+		if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
+			dr->dr_complete =
+			    (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+				DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+				DATAIN_COMPLETE_CONNECTION_RECOVERY;
+		}
+	} else {
+		if ((dr->begrun + dr->runlength) == dr->data_sn) {
+			dr->dr_complete =
+			    (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+				DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+				DATAIN_COMPLETE_CONNECTION_RECOVERY;
+		}
+	}
+
+	return dr;
+}
+
+/*
+ *	For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=No.
+ */
+static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no(
+	struct iscsi_cmd *cmd,
+	struct iscsi_datain *datain)
+{
+	u32 read_data_done, read_data_left, seq_send_order;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_datain_req *dr;
+	struct iscsi_pdu *pdu;
+	struct iscsi_seq *seq = NULL;
+
+	dr = iscsit_get_datain_req(cmd);
+	if (!dr)
+		return NULL;
+
+	if (dr->recovery && dr->generate_recovery_values) {
+		if (iscsit_create_recovery_datain_values_datasequenceinorder_no(
+					cmd, dr) < 0)
+			return NULL;
+
+		dr->generate_recovery_values = 0;
+	}
+
+	read_data_done = (!dr->recovery) ?
+			cmd->read_data_done : dr->read_data_done;
+	seq_send_order = (!dr->recovery) ?
+			cmd->seq_send_order : dr->seq_send_order;
+
+	read_data_left = (cmd->data_length - read_data_done);
+	if (!read_data_left) {
+		pr_err("ITT: 0x%08x read_data_left is zero!\n",
+				cmd->init_task_tag);
+		return NULL;
+	}
+
+	seq = iscsit_get_seq_holder_for_datain(cmd, seq_send_order);
+	if (!seq)
+		return NULL;
+
+	seq->sent = 1;
+
+	if (!dr->recovery && !seq->next_burst_len)
+		seq->first_datasn = cmd->data_sn;
+
+	pdu = iscsit_get_pdu_holder_for_seq(cmd, seq);
+	if (!pdu)
+		return NULL;
+
+	if (seq->pdu_send_order == seq->pdu_count) {
+		pdu->flags |= ISCSI_FLAG_CMD_FINAL;
+		if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+			pdu->flags |= ISCSI_FLAG_DATA_ACK;
+
+		seq->next_burst_len = 0;
+		seq_send_order++;
+	} else
+		seq->next_burst_len += pdu->length;
+
+	if ((read_data_done + pdu->length) == cmd->data_length)
+		pdu->flags |= ISCSI_FLAG_DATA_STATUS;
+
+	pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
+	if (!dr->recovery) {
+		cmd->seq_send_order = seq_send_order;
+		cmd->read_data_done += pdu->length;
+	} else {
+		dr->seq_send_order = seq_send_order;
+		dr->read_data_done += pdu->length;
+	}
+
+	datain->flags = pdu->flags;
+	datain->length = pdu->length;
+	datain->offset = pdu->offset;
+	datain->data_sn = pdu->data_sn;
+
+	if (!dr->recovery) {
+		if (datain->flags & ISCSI_FLAG_CMD_FINAL)
+			seq->last_datasn = datain->data_sn;
+		if (datain->flags & ISCSI_FLAG_DATA_STATUS)
+			dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+		return dr;
+	}
+
+	if (!dr->runlength) {
+		if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
+			dr->dr_complete =
+			    (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+				DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+				DATAIN_COMPLETE_CONNECTION_RECOVERY;
+		}
+	} else {
+		if ((dr->begrun + dr->runlength) == dr->data_sn) {
+			dr->dr_complete =
+			    (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+				DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+				DATAIN_COMPLETE_CONNECTION_RECOVERY;
+		}
+	}
+
+	return dr;
+}
+
+struct iscsi_datain_req *iscsit_get_datain_values(
+	struct iscsi_cmd *cmd,
+	struct iscsi_datain *datain)
+{
+	struct iscsi_conn *conn = cmd->conn;
+
+	if (conn->sess->sess_ops->DataSequenceInOrder &&
+	    conn->sess->sess_ops->DataPDUInOrder)
+		return iscsit_set_datain_values_yes_and_yes(cmd, datain);
+	else if (!conn->sess->sess_ops->DataSequenceInOrder &&
+		  conn->sess->sess_ops->DataPDUInOrder)
+		return iscsit_set_datain_values_no_and_yes(cmd, datain);
+	else if (conn->sess->sess_ops->DataSequenceInOrder &&
+		 !conn->sess->sess_ops->DataPDUInOrder)
+		return iscsit_set_datain_values_yes_and_no(cmd, datain);
+	else if (!conn->sess->sess_ops->DataSequenceInOrder &&
+		   !conn->sess->sess_ops->DataPDUInOrder)
+		return iscsit_set_datain_values_no_and_no(cmd, datain);
+
+	return NULL;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_datain_values.h b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_datain_values.h
new file mode 100644
index 0000000..646429a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_datain_values.h
@@ -0,0 +1,12 @@
+#ifndef ISCSI_TARGET_DATAIN_VALUES_H
+#define ISCSI_TARGET_DATAIN_VALUES_H
+
+extern struct iscsi_datain_req *iscsit_allocate_datain_req(void);
+extern void iscsit_attach_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
+extern void iscsit_free_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
+extern void iscsit_free_all_datain_reqs(struct iscsi_cmd *);
+extern struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *);
+extern struct iscsi_datain_req *iscsit_get_datain_values(struct iscsi_cmd *,
+			struct iscsi_datain *);
+
+#endif   /*** ISCSI_TARGET_DATAIN_VALUES_H ***/
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_device.c b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_device.c
new file mode 100644
index 0000000..bcc4098
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_device.c
@@ -0,0 +1,67 @@
+/*******************************************************************************
+ * This file contains the iSCSI Virtual Device and Disk Transport
+ * agnostic related functions.
+ *
+ \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <scsi/scsi_device.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+
+void iscsit_determine_maxcmdsn(struct iscsi_session *sess)
+{
+	struct se_node_acl *se_nacl;
+
+	/*
+	 * This is a discovery session, the single queue slot was already
+	 * assigned in iscsi_login_zero_tsih().  Since only Logout and
+	 * Text Opcodes are allowed during discovery we do not have to worry
+	 * about the HBA's queue depth here.
+	 */
+	if (sess->sess_ops->SessionType)
+		return;
+
+	se_nacl = sess->se_sess->se_node_acl;
+
+	/*
+	 * This is a normal session, set the Session's CmdSN window to the
+	 * struct se_node_acl->queue_depth.  The value in struct se_node_acl->queue_depth
+	 * has already been validated as a legal value in
+	 * core_set_queue_depth_for_node().
+	 */
+	sess->cmdsn_window = se_nacl->queue_depth;
+	sess->max_cmd_sn = (sess->max_cmd_sn + se_nacl->queue_depth) - 1;
+}
+
+void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess)
+{
+	if (cmd->immediate_cmd || cmd->maxcmdsn_inc)
+		return;
+
+	cmd->maxcmdsn_inc = 1;
+
+	mutex_lock(&sess->cmdsn_mutex);
+	sess->max_cmd_sn += 1;
+	pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
+	mutex_unlock(&sess->cmdsn_mutex);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_device.h b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_device.h
new file mode 100644
index 0000000..a0e2df9
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_device.h
@@ -0,0 +1,7 @@
+#ifndef ISCSI_TARGET_DEVICE_H
+#define ISCSI_TARGET_DEVICE_H
+
+extern void iscsit_determine_maxcmdsn(struct iscsi_session *);
+extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
+
+#endif /* ISCSI_TARGET_DEVICE_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_erl0.c b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_erl0.c
new file mode 100644
index 0000000..a7c4324
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_erl0.c
@@ -0,0 +1,1004 @@
+/******************************************************************************
+ * This file contains error recovery level zero functions used by
+ * the iSCSI Target driver.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_tq.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+
+/*
+ *	Used to set values in struct iscsi_cmd that iscsit_dataout_check_sequence()
+ *	checks against to determine a PDU's Offset+Length is within the current
+ *	DataOUT Sequence.  Used for DataSequenceInOrder=Yes only.
+ */
+void iscsit_set_dataout_sequence_values(
+	struct iscsi_cmd *cmd)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	/*
+	 * Still set seq_start_offset and seq_end_offset for Unsolicited
+	 * DataOUT, even if DataSequenceInOrder=No.
+	 */
+	if (cmd->unsolicited_data) {
+		cmd->seq_start_offset = cmd->write_data_done;
+		cmd->seq_end_offset = (cmd->write_data_done +
+			(cmd->data_length >
+			 conn->sess->sess_ops->FirstBurstLength) ?
+			conn->sess->sess_ops->FirstBurstLength : cmd->data_length);
+		return;
+	}
+
+	if (!conn->sess->sess_ops->DataSequenceInOrder)
+		return;
+
+	if (!cmd->seq_start_offset && !cmd->seq_end_offset) {
+		cmd->seq_start_offset = cmd->write_data_done;
+		cmd->seq_end_offset = (cmd->data_length >
+			conn->sess->sess_ops->MaxBurstLength) ?
+			(cmd->write_data_done +
+			conn->sess->sess_ops->MaxBurstLength) : cmd->data_length;
+	} else {
+		cmd->seq_start_offset = cmd->seq_end_offset;
+		cmd->seq_end_offset = ((cmd->seq_end_offset +
+			conn->sess->sess_ops->MaxBurstLength) >=
+			cmd->data_length) ? cmd->data_length :
+			(cmd->seq_end_offset +
+			 conn->sess->sess_ops->MaxBurstLength);
+	}
+}
+
+static int iscsit_dataout_within_command_recovery_check(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_data *hdr = (struct iscsi_data *) buf;
+	u32 payload_length = ntoh24(hdr->dlength);
+
+	/*
+	 * We do the within-command recovery checks here as it is
+	 * the first function called in iscsi_check_pre_dataout().
+	 * Basically, if we are in within-command recovery and
+	 * the PDU does not contain the offset the sequence needs,
+	 * dump the payload.
+	 *
+	 * This only applies to DataPDUInOrder=Yes, for
+	 * DataPDUInOrder=No we only re-request the failed PDU
+	 * and check that all PDUs in a sequence are received
+	 * upon end of sequence.
+	 */
+	if (conn->sess->sess_ops->DataSequenceInOrder) {
+		if ((cmd->cmd_flags & ICF_WITHIN_COMMAND_RECOVERY) &&
+		    (cmd->write_data_done != hdr->offset))
+			goto dump;
+
+		cmd->cmd_flags &= ~ICF_WITHIN_COMMAND_RECOVERY;
+	} else {
+		struct iscsi_seq *seq;
+
+		seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length);
+		if (!seq)
+			return DATAOUT_CANNOT_RECOVER;
+		/*
+		 * Set the struct iscsi_seq pointer to reuse later.
+		 */
+		cmd->seq_ptr = seq;
+
+		if (conn->sess->sess_ops->DataPDUInOrder) {
+			if ((seq->status ==
+			     DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) &&
+			   ((seq->offset != hdr->offset) ||
+			    (seq->data_sn != hdr->datasn)))
+				goto dump;
+		} else {
+			if ((seq->status ==
+			     DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) &&
+			    (seq->data_sn != hdr->datasn))
+				goto dump;
+		}
+
+		if (seq->status == DATAOUT_SEQUENCE_COMPLETE)
+			goto dump;
+
+		if (seq->status != DATAOUT_SEQUENCE_COMPLETE)
+			seq->status = 0;
+	}
+
+	return DATAOUT_NORMAL;
+
+dump:
+	pr_err("Dumping DataOUT PDU Offset: %u Length: %d DataSN:"
+		" 0x%08x\n", hdr->offset, payload_length, hdr->datasn);
+	return iscsit_dump_data_payload(conn, payload_length, 1);
+}
+
+static int iscsit_dataout_check_unsolicited_sequence(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	u32 first_burst_len;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_data *hdr = (struct iscsi_data *) buf;
+	u32 payload_length = ntoh24(hdr->dlength);
+
+
+	if ((hdr->offset < cmd->seq_start_offset) ||
+	   ((hdr->offset + payload_length) > cmd->seq_end_offset)) {
+		pr_err("Command ITT: 0x%08x with Offset: %u,"
+		" Length: %u outside of Unsolicited Sequence %u:%u while"
+		" DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
+		hdr->offset, payload_length, cmd->seq_start_offset,
+			cmd->seq_end_offset);
+		return DATAOUT_CANNOT_RECOVER;
+	}
+
+	first_burst_len = (cmd->first_burst_len + payload_length);
+
+	if (first_burst_len > conn->sess->sess_ops->FirstBurstLength) {
+		pr_err("Total %u bytes exceeds FirstBurstLength: %u"
+			" for this Unsolicited DataOut Burst.\n",
+			first_burst_len, conn->sess->sess_ops->FirstBurstLength);
+		transport_send_check_condition_and_sense(&cmd->se_cmd,
+				TCM_INCORRECT_AMOUNT_OF_DATA, 0);
+		return DATAOUT_CANNOT_RECOVER;
+	}
+
+	/*
+	 * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity
+	 * checks for the current Unsolicited DataOUT Sequence.
+	 */
+	if (hdr->flags & ISCSI_FLAG_CMD_FINAL) {
+		/*
+		 * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of
+		 * sequence checks are handled in
+		 * iscsit_dataout_datapduinorder_no_fbit().
+		 */
+		if (!conn->sess->sess_ops->DataPDUInOrder)
+			goto out;
+
+		if ((first_burst_len != cmd->data_length) &&
+		    (first_burst_len != conn->sess->sess_ops->FirstBurstLength)) {
+			pr_err("Unsolicited non-immediate data"
+			" received %u does not equal FirstBurstLength: %u, and"
+			" does not equal ExpXferLen %u.\n", first_burst_len,
+				conn->sess->sess_ops->FirstBurstLength,
+				cmd->data_length);
+			transport_send_check_condition_and_sense(&cmd->se_cmd,
+					TCM_INCORRECT_AMOUNT_OF_DATA, 0);
+			return DATAOUT_CANNOT_RECOVER;
+		}
+	} else {
+		if (first_burst_len == conn->sess->sess_ops->FirstBurstLength) {
+			pr_err("Command ITT: 0x%08x reached"
+			" FirstBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
+				" error.\n", cmd->init_task_tag,
+				conn->sess->sess_ops->FirstBurstLength);
+			return DATAOUT_CANNOT_RECOVER;
+		}
+		if (first_burst_len == cmd->data_length) {
+			pr_err("Command ITT: 0x%08x reached"
+			" ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
+			" error.\n", cmd->init_task_tag, cmd->data_length);
+			return DATAOUT_CANNOT_RECOVER;
+		}
+	}
+
+out:
+	return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_check_sequence(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	u32 next_burst_len;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_seq *seq = NULL;
+	struct iscsi_data *hdr = (struct iscsi_data *) buf;
+	u32 payload_length = ntoh24(hdr->dlength);
+
+	/*
+	 * For DataSequenceInOrder=Yes: Check that the offset and offset+length
+	 * is within range as defined by iscsi_set_dataout_sequence_values().
+	 *
+	 * For DataSequenceInOrder=No: Check that an struct iscsi_seq exists for
+	 * offset+length tuple.
+	 */
+	if (conn->sess->sess_ops->DataSequenceInOrder) {
+		/*
+		 * Due to possibility of recovery DataOUT sent by the initiator
+		 * fullfilling an Recovery R2T, it's best to just dump the
+		 * payload here, instead of erroring out.
+		 */
+		if ((hdr->offset < cmd->seq_start_offset) ||
+		   ((hdr->offset + payload_length) > cmd->seq_end_offset)) {
+			pr_err("Command ITT: 0x%08x with Offset: %u,"
+			" Length: %u outside of Sequence %u:%u while"
+			" DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
+			hdr->offset, payload_length, cmd->seq_start_offset,
+				cmd->seq_end_offset);
+
+			if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+				return DATAOUT_CANNOT_RECOVER;
+			return DATAOUT_WITHIN_COMMAND_RECOVERY;
+		}
+
+		next_burst_len = (cmd->next_burst_len + payload_length);
+	} else {
+		seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length);
+		if (!seq)
+			return DATAOUT_CANNOT_RECOVER;
+		/*
+		 * Set the struct iscsi_seq pointer to reuse later.
+		 */
+		cmd->seq_ptr = seq;
+
+		if (seq->status == DATAOUT_SEQUENCE_COMPLETE) {
+			if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+				return DATAOUT_CANNOT_RECOVER;
+			return DATAOUT_WITHIN_COMMAND_RECOVERY;
+		}
+
+		next_burst_len = (seq->next_burst_len + payload_length);
+	}
+
+	if (next_burst_len > conn->sess->sess_ops->MaxBurstLength) {
+		pr_err("Command ITT: 0x%08x, NextBurstLength: %u and"
+			" Length: %u exceeds MaxBurstLength: %u. protocol"
+			" error.\n", cmd->init_task_tag,
+			(next_burst_len - payload_length),
+			payload_length, conn->sess->sess_ops->MaxBurstLength);
+		return DATAOUT_CANNOT_RECOVER;
+	}
+
+	/*
+	 * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity
+	 * checks for the current DataOUT Sequence.
+	 */
+	if (hdr->flags & ISCSI_FLAG_CMD_FINAL) {
+		/*
+		 * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of
+		 * sequence checks are handled in
+		 * iscsit_dataout_datapduinorder_no_fbit().
+		 */
+		if (!conn->sess->sess_ops->DataPDUInOrder)
+			goto out;
+
+		if (conn->sess->sess_ops->DataSequenceInOrder) {
+			if ((next_burst_len <
+			     conn->sess->sess_ops->MaxBurstLength) &&
+			   ((cmd->write_data_done + payload_length) <
+			     cmd->data_length)) {
+				pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
+				" before end of DataOUT sequence, protocol"
+				" error.\n", cmd->init_task_tag);
+				return DATAOUT_CANNOT_RECOVER;
+			}
+		} else {
+			if (next_burst_len < seq->xfer_len) {
+				pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
+				" before end of DataOUT sequence, protocol"
+				" error.\n", cmd->init_task_tag);
+				return DATAOUT_CANNOT_RECOVER;
+			}
+		}
+	} else {
+		if (conn->sess->sess_ops->DataSequenceInOrder) {
+			if (next_burst_len ==
+					conn->sess->sess_ops->MaxBurstLength) {
+				pr_err("Command ITT: 0x%08x reached"
+				" MaxBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is"
+				" not set, protocol error.", cmd->init_task_tag,
+					conn->sess->sess_ops->MaxBurstLength);
+				return DATAOUT_CANNOT_RECOVER;
+			}
+			if ((cmd->write_data_done + payload_length) ==
+					cmd->data_length) {
+				pr_err("Command ITT: 0x%08x reached"
+				" last DataOUT PDU in sequence but ISCSI_FLAG_"
+				"CMD_FINAL is not set, protocol error.\n",
+					cmd->init_task_tag);
+				return DATAOUT_CANNOT_RECOVER;
+			}
+		} else {
+			if (next_burst_len == seq->xfer_len) {
+				pr_err("Command ITT: 0x%08x reached"
+				" last DataOUT PDU in sequence but ISCSI_FLAG_"
+				"CMD_FINAL is not set, protocol error.\n",
+					cmd->init_task_tag);
+				return DATAOUT_CANNOT_RECOVER;
+			}
+		}
+	}
+
+out:
+	return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_check_datasn(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	int dump = 0, recovery = 0;
+	u32 data_sn = 0;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_data *hdr = (struct iscsi_data *) buf;
+	u32 payload_length = ntoh24(hdr->dlength);
+
+	/*
+	 * Considering the target has no method of re-requesting DataOUT
+	 * by DataSN, if we receieve a greater DataSN than expected we
+	 * assume the functions for DataPDUInOrder=[Yes,No] below will
+	 * handle it.
+	 *
+	 * If the DataSN is less than expected, dump the payload.
+	 */
+	if (conn->sess->sess_ops->DataSequenceInOrder)
+		data_sn = cmd->data_sn;
+	else {
+		struct iscsi_seq *seq = cmd->seq_ptr;
+		data_sn = seq->data_sn;
+	}
+
+	if (hdr->datasn > data_sn) {
+		pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
+			" higher than expected 0x%08x.\n", cmd->init_task_tag,
+				hdr->datasn, data_sn);
+		recovery = 1;
+		goto recover;
+	} else if (hdr->datasn < data_sn) {
+		pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
+			" lower than expected 0x%08x, discarding payload.\n",
+			cmd->init_task_tag, hdr->datasn, data_sn);
+		dump = 1;
+		goto dump;
+	}
+
+	return DATAOUT_NORMAL;
+
+recover:
+	if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+		pr_err("Unable to perform within-command recovery"
+				" while ERL=0.\n");
+		return DATAOUT_CANNOT_RECOVER;
+	}
+dump:
+	if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+		return DATAOUT_CANNOT_RECOVER;
+
+	return (recovery || dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY :
+				DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_pre_datapduinorder_yes(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	int dump = 0, recovery = 0;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_data *hdr = (struct iscsi_data *) buf;
+	u32 payload_length = ntoh24(hdr->dlength);
+
+	/*
+	 * For DataSequenceInOrder=Yes: If the offset is greater than the global
+	 * DataPDUInOrder=Yes offset counter in struct iscsi_cmd a protcol error has
+	 * occured and fail the connection.
+	 *
+	 * For DataSequenceInOrder=No: If the offset is greater than the per
+	 * sequence DataPDUInOrder=Yes offset counter in struct iscsi_seq a protocol
+	 * error has occured and fail the connection.
+	 */
+	if (conn->sess->sess_ops->DataSequenceInOrder) {
+		if (hdr->offset != cmd->write_data_done) {
+			pr_err("Command ITT: 0x%08x, received offset"
+			" %u different than expected %u.\n", cmd->init_task_tag,
+				hdr->offset, cmd->write_data_done);
+			recovery = 1;
+			goto recover;
+		}
+	} else {
+		struct iscsi_seq *seq = cmd->seq_ptr;
+
+		if (hdr->offset > seq->offset) {
+			pr_err("Command ITT: 0x%08x, received offset"
+			" %u greater than expected %u.\n", cmd->init_task_tag,
+				hdr->offset, seq->offset);
+			recovery = 1;
+			goto recover;
+		} else if (hdr->offset < seq->offset) {
+			pr_err("Command ITT: 0x%08x, received offset"
+			" %u less than expected %u, discarding payload.\n",
+				cmd->init_task_tag, hdr->offset, seq->offset);
+			dump = 1;
+			goto dump;
+		}
+	}
+
+	return DATAOUT_NORMAL;
+
+recover:
+	if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+		pr_err("Unable to perform within-command recovery"
+				" while ERL=0.\n");
+		return DATAOUT_CANNOT_RECOVER;
+	}
+dump:
+	if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+		return DATAOUT_CANNOT_RECOVER;
+
+	return (recovery) ? iscsit_recover_dataout_sequence(cmd,
+		hdr->offset, payload_length) :
+	       (dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_pre_datapduinorder_no(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	struct iscsi_pdu *pdu;
+	struct iscsi_data *hdr = (struct iscsi_data *) buf;
+	u32 payload_length = ntoh24(hdr->dlength);
+
+	pdu = iscsit_get_pdu_holder(cmd, hdr->offset, payload_length);
+	if (!pdu)
+		return DATAOUT_CANNOT_RECOVER;
+
+	cmd->pdu_ptr = pdu;
+
+	switch (pdu->status) {
+	case ISCSI_PDU_NOT_RECEIVED:
+	case ISCSI_PDU_CRC_FAILED:
+	case ISCSI_PDU_TIMED_OUT:
+		break;
+	case ISCSI_PDU_RECEIVED_OK:
+		pr_err("Command ITT: 0x%08x received already gotten"
+			" Offset: %u, Length: %u\n", cmd->init_task_tag,
+				hdr->offset, payload_length);
+		return iscsit_dump_data_payload(cmd->conn, payload_length, 1);
+	default:
+		return DATAOUT_CANNOT_RECOVER;
+	}
+
+	return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_update_r2t(struct iscsi_cmd *cmd, u32 offset, u32 length)
+{
+	struct iscsi_r2t *r2t;
+
+	if (cmd->unsolicited_data)
+		return 0;
+
+	r2t = iscsit_get_r2t_for_eos(cmd, offset, length);
+	if (!r2t)
+		return -1;
+
+	spin_lock_bh(&cmd->r2t_lock);
+	r2t->seq_complete = 1;
+	cmd->outstanding_r2ts--;
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	return 0;
+}
+
+static int iscsit_dataout_update_datapduinorder_no(
+	struct iscsi_cmd *cmd,
+	u32 data_sn,
+	int f_bit)
+{
+	int ret = 0;
+	struct iscsi_pdu *pdu = cmd->pdu_ptr;
+
+	pdu->data_sn = data_sn;
+
+	switch (pdu->status) {
+	case ISCSI_PDU_NOT_RECEIVED:
+		pdu->status = ISCSI_PDU_RECEIVED_OK;
+		break;
+	case ISCSI_PDU_CRC_FAILED:
+		pdu->status = ISCSI_PDU_RECEIVED_OK;
+		break;
+	case ISCSI_PDU_TIMED_OUT:
+		pdu->status = ISCSI_PDU_RECEIVED_OK;
+		break;
+	default:
+		return DATAOUT_CANNOT_RECOVER;
+	}
+
+	if (f_bit) {
+		ret = iscsit_dataout_datapduinorder_no_fbit(cmd, pdu);
+		if (ret == DATAOUT_CANNOT_RECOVER)
+			return ret;
+	}
+
+	return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_post_crc_passed(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	int ret, send_r2t = 0;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_seq *seq = NULL;
+	struct iscsi_data *hdr = (struct iscsi_data *) buf;
+	u32 payload_length = ntoh24(hdr->dlength);
+
+	if (cmd->unsolicited_data) {
+		if ((cmd->first_burst_len + payload_length) ==
+		     conn->sess->sess_ops->FirstBurstLength) {
+			if (iscsit_dataout_update_r2t(cmd, hdr->offset,
+					payload_length) < 0)
+				return DATAOUT_CANNOT_RECOVER;
+			send_r2t = 1;
+		}
+
+		if (!conn->sess->sess_ops->DataPDUInOrder) {
+			ret = iscsit_dataout_update_datapduinorder_no(cmd,
+				hdr->datasn, (hdr->flags & ISCSI_FLAG_CMD_FINAL));
+			if (ret == DATAOUT_CANNOT_RECOVER)
+				return ret;
+		}
+
+		cmd->first_burst_len += payload_length;
+
+		if (conn->sess->sess_ops->DataSequenceInOrder)
+			cmd->data_sn++;
+		else {
+			seq = cmd->seq_ptr;
+			seq->data_sn++;
+			seq->offset += payload_length;
+		}
+
+		if (send_r2t) {
+			if (seq)
+				seq->status = DATAOUT_SEQUENCE_COMPLETE;
+			cmd->first_burst_len = 0;
+			cmd->unsolicited_data = 0;
+		}
+	} else {
+		if (conn->sess->sess_ops->DataSequenceInOrder) {
+			if ((cmd->next_burst_len + payload_length) ==
+			     conn->sess->sess_ops->MaxBurstLength) {
+				if (iscsit_dataout_update_r2t(cmd, hdr->offset,
+						payload_length) < 0)
+					return DATAOUT_CANNOT_RECOVER;
+				send_r2t = 1;
+			}
+
+			if (!conn->sess->sess_ops->DataPDUInOrder) {
+				ret = iscsit_dataout_update_datapduinorder_no(
+						cmd, hdr->datasn,
+						(hdr->flags & ISCSI_FLAG_CMD_FINAL));
+				if (ret == DATAOUT_CANNOT_RECOVER)
+					return ret;
+			}
+
+			cmd->next_burst_len += payload_length;
+			cmd->data_sn++;
+
+			if (send_r2t)
+				cmd->next_burst_len = 0;
+		} else {
+			seq = cmd->seq_ptr;
+
+			if ((seq->next_burst_len + payload_length) ==
+			     seq->xfer_len) {
+				if (iscsit_dataout_update_r2t(cmd, hdr->offset,
+						payload_length) < 0)
+					return DATAOUT_CANNOT_RECOVER;
+				send_r2t = 1;
+			}
+
+			if (!conn->sess->sess_ops->DataPDUInOrder) {
+				ret = iscsit_dataout_update_datapduinorder_no(
+						cmd, hdr->datasn,
+						(hdr->flags & ISCSI_FLAG_CMD_FINAL));
+				if (ret == DATAOUT_CANNOT_RECOVER)
+					return ret;
+			}
+
+			seq->data_sn++;
+			seq->offset += payload_length;
+			seq->next_burst_len += payload_length;
+
+			if (send_r2t) {
+				seq->next_burst_len = 0;
+				seq->status = DATAOUT_SEQUENCE_COMPLETE;
+			}
+		}
+	}
+
+	if (send_r2t && conn->sess->sess_ops->DataSequenceInOrder)
+		cmd->data_sn = 0;
+
+	cmd->write_data_done += payload_length;
+
+	return (cmd->write_data_done == cmd->data_length) ?
+		DATAOUT_SEND_TO_TRANSPORT : (send_r2t) ?
+		DATAOUT_SEND_R2T : DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_post_crc_failed(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_pdu *pdu;
+	struct iscsi_data *hdr = (struct iscsi_data *) buf;
+	u32 payload_length = ntoh24(hdr->dlength);
+
+	if (conn->sess->sess_ops->DataPDUInOrder)
+		goto recover;
+	/*
+	 * The rest of this function is only called when DataPDUInOrder=No.
+	 */
+	pdu = cmd->pdu_ptr;
+
+	switch (pdu->status) {
+	case ISCSI_PDU_NOT_RECEIVED:
+		pdu->status = ISCSI_PDU_CRC_FAILED;
+		break;
+	case ISCSI_PDU_CRC_FAILED:
+		break;
+	case ISCSI_PDU_TIMED_OUT:
+		pdu->status = ISCSI_PDU_CRC_FAILED;
+		break;
+	default:
+		return DATAOUT_CANNOT_RECOVER;
+	}
+
+recover:
+	return iscsit_recover_dataout_sequence(cmd, hdr->offset, payload_length);
+}
+
+/*
+ *	Called from iscsit_handle_data_out() before DataOUT Payload is received
+ *	and CRC computed.
+ */
+extern int iscsit_check_pre_dataout(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	int ret;
+	struct iscsi_conn *conn = cmd->conn;
+
+	ret = iscsit_dataout_within_command_recovery_check(cmd, buf);
+	if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
+	    (ret == DATAOUT_CANNOT_RECOVER))
+		return ret;
+
+	ret = iscsit_dataout_check_datasn(cmd, buf);
+	if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
+	    (ret == DATAOUT_CANNOT_RECOVER))
+		return ret;
+
+	if (cmd->unsolicited_data) {
+		ret = iscsit_dataout_check_unsolicited_sequence(cmd, buf);
+		if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
+		    (ret == DATAOUT_CANNOT_RECOVER))
+			return ret;
+	} else {
+		ret = iscsit_dataout_check_sequence(cmd, buf);
+		if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
+		    (ret == DATAOUT_CANNOT_RECOVER))
+			return ret;
+	}
+
+	return (conn->sess->sess_ops->DataPDUInOrder) ?
+		iscsit_dataout_pre_datapduinorder_yes(cmd, buf) :
+		iscsit_dataout_pre_datapduinorder_no(cmd, buf);
+}
+
+/*
+ *	Called from iscsit_handle_data_out() after DataOUT Payload is received
+ *	and CRC computed.
+ */
+int iscsit_check_post_dataout(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf,
+	u8 data_crc_failed)
+{
+	struct iscsi_conn *conn = cmd->conn;
+
+	cmd->dataout_timeout_retries = 0;
+
+	if (!data_crc_failed)
+		return iscsit_dataout_post_crc_passed(cmd, buf);
+	else {
+		if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+			pr_err("Unable to recover from DataOUT CRC"
+				" failure while ERL=0, closing session.\n");
+			iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
+					1, 0, buf, cmd);
+			return DATAOUT_CANNOT_RECOVER;
+		}
+
+		iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
+				0, 0, buf, cmd);
+		return iscsit_dataout_post_crc_failed(cmd, buf);
+	}
+}
+
+static void iscsit_handle_time2retain_timeout(unsigned long data)
+{
+	struct iscsi_session *sess = (struct iscsi_session *) data;
+	struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+
+	spin_lock_bh(&se_tpg->session_lock);
+	if (sess->time2retain_timer_flags & ISCSI_TF_STOP) {
+		spin_unlock_bh(&se_tpg->session_lock);
+		return;
+	}
+	if (atomic_read(&sess->session_reinstatement)) {
+		pr_err("Exiting Time2Retain handler because"
+				" session_reinstatement=1\n");
+		spin_unlock_bh(&se_tpg->session_lock);
+		return;
+	}
+	sess->time2retain_timer_flags |= ISCSI_TF_EXPIRED;
+
+	pr_err("Time2Retain timer expired for SID: %u, cleaning up"
+			" iSCSI session.\n", sess->sid);
+	{
+	struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+	if (tiqn) {
+		spin_lock(&tiqn->sess_err_stats.lock);
+		strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
+			(void *)sess->sess_ops->InitiatorName);
+		tiqn->sess_err_stats.last_sess_failure_type =
+				ISCSI_SESS_ERR_CXN_TIMEOUT;
+		tiqn->sess_err_stats.cxn_timeout_errors++;
+		sess->conn_timeout_errors++;
+		spin_unlock(&tiqn->sess_err_stats.lock);
+	}
+	}
+
+	spin_unlock_bh(&se_tpg->session_lock);
+	target_put_session(sess->se_sess);
+}
+
+extern void iscsit_start_time2retain_handler(struct iscsi_session *sess)
+{
+	int tpg_active;
+	/*
+	 * Only start Time2Retain timer when the assoicated TPG is still in
+	 * an ACTIVE (eg: not disabled or shutdown) state.
+	 */
+	spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock);
+	tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE);
+	spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock);
+
+	if (!tpg_active)
+		return;
+
+	if (sess->time2retain_timer_flags & ISCSI_TF_RUNNING)
+		return;
+
+	pr_debug("Starting Time2Retain timer for %u seconds on"
+		" SID: %u\n", sess->sess_ops->DefaultTime2Retain, sess->sid);
+
+	init_timer(&sess->time2retain_timer);
+	sess->time2retain_timer.expires =
+		(get_jiffies_64() + sess->sess_ops->DefaultTime2Retain * HZ);
+	sess->time2retain_timer.data = (unsigned long)sess;
+	sess->time2retain_timer.function = iscsit_handle_time2retain_timeout;
+	sess->time2retain_timer_flags &= ~ISCSI_TF_STOP;
+	sess->time2retain_timer_flags |= ISCSI_TF_RUNNING;
+	add_timer(&sess->time2retain_timer);
+}
+
+/*
+ *	Called with spin_lock_bh(&struct se_portal_group->session_lock) held
+ */
+extern int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
+{
+	struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+
+	if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)
+		return -1;
+
+	if (!(sess->time2retain_timer_flags & ISCSI_TF_RUNNING))
+		return 0;
+
+	sess->time2retain_timer_flags |= ISCSI_TF_STOP;
+	spin_unlock(&se_tpg->session_lock);
+
+	del_timer_sync(&sess->time2retain_timer);
+
+	spin_lock(&se_tpg->session_lock);
+	sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
+	pr_debug("Stopped Time2Retain Timer for SID: %u\n",
+			sess->sid);
+	return 0;
+}
+
+void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
+{
+	spin_lock_bh(&conn->state_lock);
+	if (atomic_read(&conn->connection_exit)) {
+		spin_unlock_bh(&conn->state_lock);
+		goto sleep;
+	}
+
+	if (atomic_read(&conn->transport_failed)) {
+		spin_unlock_bh(&conn->state_lock);
+		goto sleep;
+	}
+	spin_unlock_bh(&conn->state_lock);
+
+	iscsi_thread_set_force_reinstatement(conn);
+
+sleep:
+	wait_for_completion(&conn->conn_wait_rcfr_comp);
+	complete(&conn->conn_post_wait_comp);
+}
+
+void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
+{
+	spin_lock_bh(&conn->state_lock);
+	if (atomic_read(&conn->connection_exit)) {
+		spin_unlock_bh(&conn->state_lock);
+		return;
+	}
+
+	if (atomic_read(&conn->transport_failed)) {
+		spin_unlock_bh(&conn->state_lock);
+		return;
+	}
+
+	if (atomic_read(&conn->connection_reinstatement)) {
+		spin_unlock_bh(&conn->state_lock);
+		return;
+	}
+
+	if (iscsi_thread_set_force_reinstatement(conn) < 0) {
+		spin_unlock_bh(&conn->state_lock);
+		return;
+	}
+
+	atomic_set(&conn->connection_reinstatement, 1);
+	if (!sleep) {
+		spin_unlock_bh(&conn->state_lock);
+		return;
+	}
+
+	atomic_set(&conn->sleep_on_conn_wait_comp, 1);
+	spin_unlock_bh(&conn->state_lock);
+
+	wait_for_completion(&conn->conn_wait_comp);
+	complete(&conn->conn_post_wait_comp);
+}
+
+void iscsit_fall_back_to_erl0(struct iscsi_session *sess)
+{
+	pr_debug("Falling back to ErrorRecoveryLevel=0 for SID:"
+			" %u\n", sess->sid);
+
+	atomic_set(&sess->session_fall_back_to_erl0, 1);
+}
+
+static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
+{
+	struct iscsi_session *sess = conn->sess;
+
+	if ((sess->sess_ops->ErrorRecoveryLevel == 2) &&
+	    !atomic_read(&sess->session_reinstatement) &&
+	    !atomic_read(&sess->session_fall_back_to_erl0))
+		iscsit_connection_recovery_transport_reset(conn);
+	else {
+		pr_debug("Performing cleanup for failed iSCSI"
+			" Connection ID: %hu from %s\n", conn->cid,
+			sess->sess_ops->InitiatorName);
+		iscsit_close_connection(conn);
+	}
+}
+
+extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
+{
+	spin_lock_bh(&conn->state_lock);
+	if (atomic_read(&conn->connection_exit)) {
+		spin_unlock_bh(&conn->state_lock);
+		return;
+	}
+	atomic_set(&conn->connection_exit, 1);
+
+	if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
+		spin_unlock_bh(&conn->state_lock);
+		iscsit_close_connection(conn);
+		return;
+	}
+
+	if (conn->conn_state == TARG_CONN_STATE_CLEANUP_WAIT) {
+		spin_unlock_bh(&conn->state_lock);
+		return;
+	}
+
+	pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
+	conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
+	spin_unlock_bh(&conn->state_lock);
+
+	iscsit_handle_connection_cleanup(conn);
+}
+
+/*
+ *	This is the simple function that makes the magic of
+ *	sync and steering happen in the follow paradoxical order:
+ *
+ *	0) Receive conn->of_marker (bytes left until next OFMarker)
+ *	   bytes into an offload buffer.  When we pass the exact number
+ *	   of bytes in conn->of_marker, iscsit_dump_data_payload() and hence
+ *	   rx_data() will automatically receive the identical u32 marker
+ *	   values and store it in conn->of_marker_offset;
+ *	1) Now conn->of_marker_offset will contain the offset to the start
+ *	   of the next iSCSI PDU.  Dump these remaining bytes into another
+ *	   offload buffer.
+ *	2) We are done!
+ *	   Next byte in the TCP stream will contain the next iSCSI PDU!
+ *	   Cool Huh?!
+ */
+int iscsit_recover_from_unknown_opcode(struct iscsi_conn *conn)
+{
+	/*
+	 * Make sure the remaining bytes to next maker is a sane value.
+	 */
+	if (conn->of_marker > (conn->conn_ops->OFMarkInt * 4)) {
+		pr_err("Remaining bytes to OFMarker: %u exceeds"
+			" OFMarkInt bytes: %u.\n", conn->of_marker,
+				conn->conn_ops->OFMarkInt * 4);
+		return -1;
+	}
+
+	pr_debug("Advancing %u bytes in TCP stream to get to the"
+			" next OFMarker.\n", conn->of_marker);
+
+	if (iscsit_dump_data_payload(conn, conn->of_marker, 0) < 0)
+		return -1;
+
+	/*
+	 * Make sure the offset marker we retrived is a valid value.
+	 */
+	if (conn->of_marker_offset > (ISCSI_HDR_LEN + (ISCSI_CRC_LEN * 2) +
+	    conn->conn_ops->MaxRecvDataSegmentLength)) {
+		pr_err("OfMarker offset value: %u exceeds limit.\n",
+			conn->of_marker_offset);
+		return -1;
+	}
+
+	pr_debug("Discarding %u bytes of TCP stream to get to the"
+			" next iSCSI Opcode.\n", conn->of_marker_offset);
+
+	if (iscsit_dump_data_payload(conn, conn->of_marker_offset, 0) < 0)
+		return -1;
+
+	return 0;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_erl0.h b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_erl0.h
new file mode 100644
index 0000000..21acc9a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_erl0.h
@@ -0,0 +1,15 @@
+#ifndef ISCSI_TARGET_ERL0_H
+#define ISCSI_TARGET_ERL0_H
+
+extern void iscsit_set_dataout_sequence_values(struct iscsi_cmd *);
+extern int iscsit_check_pre_dataout(struct iscsi_cmd *, unsigned char *);
+extern int iscsit_check_post_dataout(struct iscsi_cmd *, unsigned char *, u8);
+extern void iscsit_start_time2retain_handler(struct iscsi_session *);
+extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
+extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
+extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
+extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
+extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
+extern int iscsit_recover_from_unknown_opcode(struct iscsi_conn *);
+
+#endif   /*** ISCSI_TARGET_ERL0_H ***/
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_erl1.c b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_erl1.c
new file mode 100644
index 0000000..fd669b5
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_erl1.c
@@ -0,0 +1,1299 @@
+/*******************************************************************************
+ * This file contains error recovery level one used by the iSCSI Target driver.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/list.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target.h"
+
+#define OFFLOAD_BUF_SIZE	32768
+
+/*
+ *	Used to dump excess datain payload for certain error recovery
+ *	situations.  Receive in OFFLOAD_BUF_SIZE max of datain per rx_data().
+ *
+ *	dump_padding_digest denotes if padding and data digests need
+ *	to be dumped.
+ */
+int iscsit_dump_data_payload(
+	struct iscsi_conn *conn,
+	u32 buf_len,
+	int dump_padding_digest)
+{
+	char *buf, pad_bytes[4];
+	int ret = DATAOUT_WITHIN_COMMAND_RECOVERY, rx_got;
+	u32 length, padding, offset = 0, size;
+	struct kvec iov;
+
+	length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len;
+
+	buf = kzalloc(length, GFP_ATOMIC);
+	if (!buf) {
+		pr_err("Unable to allocate %u bytes for offload"
+				" buffer.\n", length);
+		return -1;
+	}
+	memset(&iov, 0, sizeof(struct kvec));
+
+	while (offset < buf_len) {
+		size = ((offset + length) > buf_len) ?
+			(buf_len - offset) : length;
+
+		iov.iov_len = size;
+		iov.iov_base = buf;
+
+		rx_got = rx_data(conn, &iov, 1, size);
+		if (rx_got != size) {
+			ret = DATAOUT_CANNOT_RECOVER;
+			goto out;
+		}
+
+		offset += size;
+	}
+
+	if (!dump_padding_digest)
+		goto out;
+
+	padding = ((-buf_len) & 3);
+	if (padding != 0) {
+		iov.iov_len = padding;
+		iov.iov_base = pad_bytes;
+
+		rx_got = rx_data(conn, &iov, 1, padding);
+		if (rx_got != padding) {
+			ret = DATAOUT_CANNOT_RECOVER;
+			goto out;
+		}
+	}
+
+	if (conn->conn_ops->DataDigest) {
+		u32 data_crc;
+
+		iov.iov_len = ISCSI_CRC_LEN;
+		iov.iov_base = &data_crc;
+
+		rx_got = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
+		if (rx_got != ISCSI_CRC_LEN) {
+			ret = DATAOUT_CANNOT_RECOVER;
+			goto out;
+		}
+	}
+
+out:
+	kfree(buf);
+	return ret;
+}
+
+/*
+ *	Used for retransmitting R2Ts from a R2T SNACK request.
+ */
+static int iscsit_send_recovery_r2t_for_snack(
+	struct iscsi_cmd *cmd,
+	struct iscsi_r2t *r2t)
+{
+	/*
+	 * If the struct iscsi_r2t has not been sent yet, we can safely
+	 * ignore retransmission
+	 * of the R2TSN in question.
+	 */
+	spin_lock_bh(&cmd->r2t_lock);
+	if (!r2t->sent_r2t) {
+		spin_unlock_bh(&cmd->r2t_lock);
+		return 0;
+	}
+	r2t->sent_r2t = 0;
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
+
+	return 0;
+}
+
+static int iscsit_handle_r2t_snack(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf,
+	u32 begrun,
+	u32 runlength)
+{
+	u32 last_r2tsn;
+	struct iscsi_r2t *r2t;
+
+	/*
+	 * Make sure the initiator is not requesting retransmission
+	 * of R2TSNs already acknowledged by a TMR TASK_REASSIGN.
+	 */
+	if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
+	    (begrun <= cmd->acked_data_sn)) {
+		pr_err("ITT: 0x%08x, R2T SNACK requesting"
+			" retransmission of R2TSN: 0x%08x to 0x%08x but already"
+			" acked to  R2TSN: 0x%08x by TMR TASK_REASSIGN,"
+			" protocol error.\n", cmd->init_task_tag, begrun,
+			(begrun + runlength), cmd->acked_data_sn);
+
+			return iscsit_add_reject_from_cmd(
+					ISCSI_REASON_PROTOCOL_ERROR,
+					1, 0, buf, cmd);
+	}
+
+	if (runlength) {
+		if ((begrun + runlength) > cmd->r2t_sn) {
+			pr_err("Command ITT: 0x%08x received R2T SNACK"
+			" with BegRun: 0x%08x, RunLength: 0x%08x, exceeds"
+			" current R2TSN: 0x%08x, protocol error.\n",
+			cmd->init_task_tag, begrun, runlength, cmd->r2t_sn);
+			return iscsit_add_reject_from_cmd(
+				ISCSI_REASON_BOOKMARK_INVALID, 1, 0, buf, cmd);
+		}
+		last_r2tsn = (begrun + runlength);
+	} else
+		last_r2tsn = cmd->r2t_sn;
+
+	while (begrun < last_r2tsn) {
+		r2t = iscsit_get_holder_for_r2tsn(cmd, begrun);
+		if (!r2t)
+			return -1;
+		if (iscsit_send_recovery_r2t_for_snack(cmd, r2t) < 0)
+			return -1;
+
+		begrun++;
+	}
+
+	return 0;
+}
+
+/*
+ *	Generates Offsets and NextBurstLength based on Begrun and Runlength
+ *	carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN.
+ *
+ *	For DataSequenceInOrder=Yes and DataPDUInOrder=[Yes,No] only.
+ *
+ *	FIXME: How is this handled for a RData SNACK?
+ */
+int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
+	struct iscsi_cmd *cmd,
+	struct iscsi_datain_req *dr)
+{
+	u32 data_sn = 0, data_sn_count = 0;
+	u32 pdu_start = 0, seq_no = 0;
+	u32 begrun = dr->begrun;
+	struct iscsi_conn *conn = cmd->conn;
+
+	while (begrun > data_sn++) {
+		data_sn_count++;
+		if ((dr->next_burst_len +
+		     conn->conn_ops->MaxRecvDataSegmentLength) <
+		     conn->sess->sess_ops->MaxBurstLength) {
+			dr->read_data_done +=
+				conn->conn_ops->MaxRecvDataSegmentLength;
+			dr->next_burst_len +=
+				conn->conn_ops->MaxRecvDataSegmentLength;
+		} else {
+			dr->read_data_done +=
+				(conn->sess->sess_ops->MaxBurstLength -
+				 dr->next_burst_len);
+			dr->next_burst_len = 0;
+			pdu_start += data_sn_count;
+			data_sn_count = 0;
+			seq_no++;
+		}
+	}
+
+	if (!conn->sess->sess_ops->DataPDUInOrder) {
+		cmd->seq_no = seq_no;
+		cmd->pdu_start = pdu_start;
+		cmd->pdu_send_order = data_sn_count;
+	}
+
+	return 0;
+}
+
+/*
+ *	Generates Offsets and NextBurstLength based on Begrun and Runlength
+ *	carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN.
+ *
+ *	For DataSequenceInOrder=No and DataPDUInOrder=[Yes,No] only.
+ *
+ *	FIXME: How is this handled for a RData SNACK?
+ */
+int iscsit_create_recovery_datain_values_datasequenceinorder_no(
+	struct iscsi_cmd *cmd,
+	struct iscsi_datain_req *dr)
+{
+	int found_seq = 0, i;
+	u32 data_sn, read_data_done = 0, seq_send_order = 0;
+	u32 begrun = dr->begrun;
+	u32 runlength = dr->runlength;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_seq *first_seq = NULL, *seq = NULL;
+
+	if (!cmd->seq_list) {
+		pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+		return -1;
+	}
+
+	/*
+	 * Calculate read_data_done for all sequences containing a
+	 * first_datasn and last_datasn less than the BegRun.
+	 *
+	 * Locate the struct iscsi_seq the BegRun lies within and calculate
+	 * NextBurstLenghth up to the DataSN based on MaxRecvDataSegmentLength.
+	 *
+	 * Also use struct iscsi_seq->seq_send_order to determine where to start.
+	 */
+	for (i = 0; i < cmd->seq_count; i++) {
+		seq = &cmd->seq_list[i];
+
+		if (!seq->seq_send_order)
+			first_seq = seq;
+
+		/*
+		 * No data has been transferred for this DataIN sequence, so the
+		 * seq->first_datasn and seq->last_datasn have not been set.
+		 */
+		if (!seq->sent) {
+#if 0
+			pr_err("Ignoring non-sent sequence 0x%08x ->"
+				" 0x%08x\n\n", seq->first_datasn,
+				seq->last_datasn);
+#endif
+			continue;
+		}
+
+		/*
+		 * This DataIN sequence is precedes the received BegRun, add the
+		 * total xfer_len of the sequence to read_data_done and reset
+		 * seq->pdu_send_order.
+		 */
+		if ((seq->first_datasn < begrun) &&
+				(seq->last_datasn < begrun)) {
+#if 0
+			pr_err("Pre BegRun sequence 0x%08x ->"
+				" 0x%08x\n", seq->first_datasn,
+				seq->last_datasn);
+#endif
+			read_data_done += cmd->seq_list[i].xfer_len;
+			seq->next_burst_len = seq->pdu_send_order = 0;
+			continue;
+		}
+
+		/*
+		 * The BegRun lies within this DataIN sequence.
+		 */
+		if ((seq->first_datasn <= begrun) &&
+				(seq->last_datasn >= begrun)) {
+#if 0
+			pr_err("Found sequence begrun: 0x%08x in"
+				" 0x%08x -> 0x%08x\n", begrun,
+				seq->first_datasn, seq->last_datasn);
+#endif
+			seq_send_order = seq->seq_send_order;
+			data_sn = seq->first_datasn;
+			seq->next_burst_len = seq->pdu_send_order = 0;
+			found_seq = 1;
+
+			/*
+			 * For DataPDUInOrder=Yes, while the first DataSN of
+			 * the sequence is less than the received BegRun, add
+			 * the MaxRecvDataSegmentLength to read_data_done and
+			 * to the sequence's next_burst_len;
+			 *
+			 * For DataPDUInOrder=No, while the first DataSN of the
+			 * sequence is less than the received BegRun, find the
+			 * struct iscsi_pdu of the DataSN in question and add the
+			 * MaxRecvDataSegmentLength to read_data_done and to the
+			 * sequence's next_burst_len;
+			 */
+			if (conn->sess->sess_ops->DataPDUInOrder) {
+				while (data_sn < begrun) {
+					seq->pdu_send_order++;
+					read_data_done +=
+						conn->conn_ops->MaxRecvDataSegmentLength;
+					seq->next_burst_len +=
+						conn->conn_ops->MaxRecvDataSegmentLength;
+					data_sn++;
+				}
+			} else {
+				int j;
+				struct iscsi_pdu *pdu;
+
+				while (data_sn < begrun) {
+					seq->pdu_send_order++;
+
+					for (j = 0; j < seq->pdu_count; j++) {
+						pdu = &cmd->pdu_list[
+							seq->pdu_start + j];
+						if (pdu->data_sn == data_sn) {
+							read_data_done +=
+								pdu->length;
+							seq->next_burst_len +=
+								pdu->length;
+						}
+					}
+					data_sn++;
+				}
+			}
+			continue;
+		}
+
+		/*
+		 * This DataIN sequence is larger than the received BegRun,
+		 * reset seq->pdu_send_order and continue.
+		 */
+		if ((seq->first_datasn > begrun) ||
+				(seq->last_datasn > begrun)) {
+#if 0
+			pr_err("Post BegRun sequence 0x%08x -> 0x%08x\n",
+					seq->first_datasn, seq->last_datasn);
+#endif
+			seq->next_burst_len = seq->pdu_send_order = 0;
+			continue;
+		}
+	}
+
+	if (!found_seq) {
+		if (!begrun) {
+			if (!first_seq) {
+				pr_err("ITT: 0x%08x, Begrun: 0x%08x"
+					" but first_seq is NULL\n",
+					cmd->init_task_tag, begrun);
+				return -1;
+			}
+			seq_send_order = first_seq->seq_send_order;
+			seq->next_burst_len = seq->pdu_send_order = 0;
+			goto done;
+		}
+
+		pr_err("Unable to locate struct iscsi_seq for ITT: 0x%08x,"
+			" BegRun: 0x%08x, RunLength: 0x%08x while"
+			" DataSequenceInOrder=No and DataPDUInOrder=%s.\n",
+				cmd->init_task_tag, begrun, runlength,
+			(conn->sess->sess_ops->DataPDUInOrder) ? "Yes" : "No");
+		return -1;
+	}
+
+done:
+	dr->read_data_done = read_data_done;
+	dr->seq_send_order = seq_send_order;
+
+	return 0;
+}
+
+static int iscsit_handle_recovery_datain(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf,
+	u32 begrun,
+	u32 runlength)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_datain_req *dr;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+
+	if (!(se_cmd->transport_state & CMD_T_COMPLETE)) {
+		pr_err("Ignoring ITT: 0x%08x Data SNACK\n",
+				cmd->init_task_tag);
+		return 0;
+	}
+
+	/*
+	 * Make sure the initiator is not requesting retransmission
+	 * of DataSNs already acknowledged by a Data ACK SNACK.
+	 */
+	if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
+	    (begrun <= cmd->acked_data_sn)) {
+		pr_err("ITT: 0x%08x, Data SNACK requesting"
+			" retransmission of DataSN: 0x%08x to 0x%08x but"
+			" already acked to DataSN: 0x%08x by Data ACK SNACK,"
+			" protocol error.\n", cmd->init_task_tag, begrun,
+			(begrun + runlength), cmd->acked_data_sn);
+
+		return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
+				1, 0, buf, cmd);
+	}
+
+	/*
+	 * Make sure BegRun and RunLength in the Data SNACK are sane.
+	 * Note: (cmd->data_sn - 1) will carry the maximum DataSN sent.
+	 */
+	if ((begrun + runlength) > (cmd->data_sn - 1)) {
+		pr_err("Initiator requesting BegRun: 0x%08x, RunLength"
+			": 0x%08x greater than maximum DataSN: 0x%08x.\n",
+				begrun, runlength, (cmd->data_sn - 1));
+		return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
+				1, 0, buf, cmd);
+	}
+
+	dr = iscsit_allocate_datain_req();
+	if (!dr)
+		return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+				1, 0, buf, cmd);
+
+	dr->data_sn = dr->begrun = begrun;
+	dr->runlength = runlength;
+	dr->generate_recovery_values = 1;
+	dr->recovery = DATAIN_WITHIN_COMMAND_RECOVERY;
+
+	iscsit_attach_datain_req(cmd, dr);
+
+	cmd->i_state = ISTATE_SEND_DATAIN;
+	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+	return 0;
+}
+
+int iscsit_handle_recovery_datain_or_r2t(
+	struct iscsi_conn *conn,
+	unsigned char *buf,
+	u32 init_task_tag,
+	u32 targ_xfer_tag,
+	u32 begrun,
+	u32 runlength)
+{
+	struct iscsi_cmd *cmd;
+
+	cmd = iscsit_find_cmd_from_itt(conn, init_task_tag);
+	if (!cmd)
+		return 0;
+
+	/*
+	 * FIXME: This will not work for bidi commands.
+	 */
+	switch (cmd->data_direction) {
+	case DMA_TO_DEVICE:
+		return iscsit_handle_r2t_snack(cmd, buf, begrun, runlength);
+	case DMA_FROM_DEVICE:
+		return iscsit_handle_recovery_datain(cmd, buf, begrun,
+				runlength);
+	default:
+		pr_err("Unknown cmd->data_direction: 0x%02x\n",
+				cmd->data_direction);
+		return -1;
+	}
+
+	return 0;
+}
+
+/* #warning FIXME: Status SNACK needs to be dependent on OPCODE!!! */
+int iscsit_handle_status_snack(
+	struct iscsi_conn *conn,
+	u32 init_task_tag,
+	u32 targ_xfer_tag,
+	u32 begrun,
+	u32 runlength)
+{
+	struct iscsi_cmd *cmd = NULL;
+	u32 last_statsn;
+	int found_cmd;
+
+	if (conn->exp_statsn > begrun) {
+		pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:"
+			" 0x%08x but already got ExpStatSN: 0x%08x on CID:"
+			" %hu.\n", begrun, runlength, conn->exp_statsn,
+			conn->cid);
+		return 0;
+	}
+
+	last_statsn = (!runlength) ? conn->stat_sn : (begrun + runlength);
+
+	while (begrun < last_statsn) {
+		found_cmd = 0;
+
+		spin_lock_bh(&conn->cmd_lock);
+		list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+			if (cmd->stat_sn == begrun) {
+				found_cmd = 1;
+				break;
+			}
+		}
+		spin_unlock_bh(&conn->cmd_lock);
+
+		if (!found_cmd) {
+			pr_err("Unable to find StatSN: 0x%08x for"
+				" a Status SNACK, assuming this was a"
+				" protactic SNACK for an untransmitted"
+				" StatSN, ignoring.\n", begrun);
+			begrun++;
+			continue;
+		}
+
+		spin_lock_bh(&cmd->istate_lock);
+		if (cmd->i_state == ISTATE_SEND_DATAIN) {
+			spin_unlock_bh(&cmd->istate_lock);
+			pr_err("Ignoring Status SNACK for BegRun:"
+				" 0x%08x, RunLength: 0x%08x, assuming this was"
+				" a protactic SNACK for an untransmitted"
+				" StatSN\n", begrun, runlength);
+			begrun++;
+			continue;
+		}
+		spin_unlock_bh(&cmd->istate_lock);
+
+		cmd->i_state = ISTATE_SEND_STATUS_RECOVERY;
+		iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+		begrun++;
+	}
+
+	return 0;
+}
+
+int iscsit_handle_data_ack(
+	struct iscsi_conn *conn,
+	u32 targ_xfer_tag,
+	u32 begrun,
+	u32 runlength)
+{
+	struct iscsi_cmd *cmd = NULL;
+
+	cmd = iscsit_find_cmd_from_ttt(conn, targ_xfer_tag);
+	if (!cmd) {
+		pr_err("Data ACK SNACK for TTT: 0x%08x is"
+			" invalid.\n", targ_xfer_tag);
+		return -1;
+	}
+
+	if (begrun <= cmd->acked_data_sn) {
+		pr_err("ITT: 0x%08x Data ACK SNACK BegRUN: 0x%08x is"
+			" less than the already acked DataSN: 0x%08x.\n",
+			cmd->init_task_tag, begrun, cmd->acked_data_sn);
+		return -1;
+	}
+
+	/*
+	 * For Data ACK SNACK, BegRun is the next expected DataSN.
+	 * (see iSCSI v19: 10.16.6)
+	 */
+	cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
+	cmd->acked_data_sn = (begrun - 1);
+
+	pr_debug("Received Data ACK SNACK for ITT: 0x%08x,"
+		" updated acked DataSN to 0x%08x.\n",
+			cmd->init_task_tag, cmd->acked_data_sn);
+
+	return 0;
+}
+
+static int iscsit_send_recovery_r2t(
+	struct iscsi_cmd *cmd,
+	u32 offset,
+	u32 xfer_len)
+{
+	int ret;
+
+	spin_lock_bh(&cmd->r2t_lock);
+	ret = iscsit_add_r2t_to_list(cmd, offset, xfer_len, 1, 0);
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	return ret;
+}
+
+int iscsit_dataout_datapduinorder_no_fbit(
+	struct iscsi_cmd *cmd,
+	struct iscsi_pdu *pdu)
+{
+	int i, send_recovery_r2t = 0, recovery = 0;
+	u32 length = 0, offset = 0, pdu_count = 0, xfer_len = 0;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_pdu *first_pdu = NULL;
+
+	/*
+	 * Get an struct iscsi_pdu pointer to the first PDU, and total PDU count
+	 * of the DataOUT sequence.
+	 */
+	if (conn->sess->sess_ops->DataSequenceInOrder) {
+		for (i = 0; i < cmd->pdu_count; i++) {
+			if (cmd->pdu_list[i].seq_no == pdu->seq_no) {
+				if (!first_pdu)
+					first_pdu = &cmd->pdu_list[i];
+				 xfer_len += cmd->pdu_list[i].length;
+				 pdu_count++;
+			} else if (pdu_count)
+				break;
+		}
+	} else {
+		struct iscsi_seq *seq = cmd->seq_ptr;
+
+		first_pdu = &cmd->pdu_list[seq->pdu_start];
+		pdu_count = seq->pdu_count;
+	}
+
+	if (!first_pdu || !pdu_count)
+		return DATAOUT_CANNOT_RECOVER;
+
+	/*
+	 * Loop through the ending DataOUT Sequence checking each struct iscsi_pdu.
+	 * The following ugly logic does batching of not received PDUs.
+	 */
+	for (i = 0; i < pdu_count; i++) {
+		if (first_pdu[i].status == ISCSI_PDU_RECEIVED_OK) {
+			if (!send_recovery_r2t)
+				continue;
+
+			if (iscsit_send_recovery_r2t(cmd, offset, length) < 0)
+				return DATAOUT_CANNOT_RECOVER;
+
+			send_recovery_r2t = length = offset = 0;
+			continue;
+		}
+		/*
+		 * Set recovery = 1 for any missing, CRC failed, or timed
+		 * out PDUs to let the DataOUT logic know that this sequence
+		 * has not been completed yet.
+		 *
+		 * Also, only send a Recovery R2T for ISCSI_PDU_NOT_RECEIVED.
+		 * We assume if the PDU either failed CRC or timed out
+		 * that a Recovery R2T has already been sent.
+		 */
+		recovery = 1;
+
+		if (first_pdu[i].status != ISCSI_PDU_NOT_RECEIVED)
+			continue;
+
+		if (!offset)
+			offset = first_pdu[i].offset;
+		length += first_pdu[i].length;
+
+		send_recovery_r2t = 1;
+	}
+
+	if (send_recovery_r2t)
+		if (iscsit_send_recovery_r2t(cmd, offset, length) < 0)
+			return DATAOUT_CANNOT_RECOVER;
+
+	return (!recovery) ? DATAOUT_NORMAL : DATAOUT_WITHIN_COMMAND_RECOVERY;
+}
+
+static int iscsit_recalculate_dataout_values(
+	struct iscsi_cmd *cmd,
+	u32 pdu_offset,
+	u32 pdu_length,
+	u32 *r2t_offset,
+	u32 *r2t_length)
+{
+	int i;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_pdu *pdu = NULL;
+
+	if (conn->sess->sess_ops->DataSequenceInOrder) {
+		cmd->data_sn = 0;
+
+		if (conn->sess->sess_ops->DataPDUInOrder) {
+			*r2t_offset = cmd->write_data_done;
+			*r2t_length = (cmd->seq_end_offset -
+					cmd->write_data_done);
+			return 0;
+		}
+
+		*r2t_offset = cmd->seq_start_offset;
+		*r2t_length = (cmd->seq_end_offset - cmd->seq_start_offset);
+
+		for (i = 0; i < cmd->pdu_count; i++) {
+			pdu = &cmd->pdu_list[i];
+
+			if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+				continue;
+
+			if ((pdu->offset >= cmd->seq_start_offset) &&
+			   ((pdu->offset + pdu->length) <=
+			     cmd->seq_end_offset)) {
+				if (!cmd->unsolicited_data)
+					cmd->next_burst_len -= pdu->length;
+				else
+					cmd->first_burst_len -= pdu->length;
+
+				cmd->write_data_done -= pdu->length;
+				pdu->status = ISCSI_PDU_NOT_RECEIVED;
+			}
+		}
+	} else {
+		struct iscsi_seq *seq = NULL;
+
+		seq = iscsit_get_seq_holder(cmd, pdu_offset, pdu_length);
+		if (!seq)
+			return -1;
+
+		*r2t_offset = seq->orig_offset;
+		*r2t_length = seq->xfer_len;
+
+		cmd->write_data_done -= (seq->offset - seq->orig_offset);
+		if (cmd->immediate_data)
+			cmd->first_burst_len = cmd->write_data_done;
+
+		seq->data_sn = 0;
+		seq->offset = seq->orig_offset;
+		seq->next_burst_len = 0;
+		seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
+
+		if (conn->sess->sess_ops->DataPDUInOrder)
+			return 0;
+
+		for (i = 0; i < seq->pdu_count; i++) {
+			pdu = &cmd->pdu_list[i+seq->pdu_start];
+
+			if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+				continue;
+
+			pdu->status = ISCSI_PDU_NOT_RECEIVED;
+		}
+	}
+
+	return 0;
+}
+
+int iscsit_recover_dataout_sequence(
+	struct iscsi_cmd *cmd,
+	u32 pdu_offset,
+	u32 pdu_length)
+{
+	u32 r2t_length = 0, r2t_offset = 0;
+
+	spin_lock_bh(&cmd->istate_lock);
+	cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY;
+	spin_unlock_bh(&cmd->istate_lock);
+
+	if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length,
+			&r2t_offset, &r2t_length) < 0)
+		return DATAOUT_CANNOT_RECOVER;
+
+	iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length);
+
+	return DATAOUT_WITHIN_COMMAND_RECOVERY;
+}
+
+static struct iscsi_ooo_cmdsn *iscsit_allocate_ooo_cmdsn(void)
+{
+	struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL;
+
+	ooo_cmdsn = kmem_cache_zalloc(lio_ooo_cache, GFP_ATOMIC);
+	if (!ooo_cmdsn) {
+		pr_err("Unable to allocate memory for"
+			" struct iscsi_ooo_cmdsn.\n");
+		return NULL;
+	}
+	INIT_LIST_HEAD(&ooo_cmdsn->ooo_list);
+
+	return ooo_cmdsn;
+}
+
+/*
+ *	Called with sess->cmdsn_mutex held.
+ */
+static int iscsit_attach_ooo_cmdsn(
+	struct iscsi_session *sess,
+	struct iscsi_ooo_cmdsn *ooo_cmdsn)
+{
+	struct iscsi_ooo_cmdsn *ooo_tail, *ooo_tmp;
+	/*
+	 * We attach the struct iscsi_ooo_cmdsn entry to the out of order
+	 * list in increasing CmdSN order.
+	 * This allows iscsi_execute_ooo_cmdsns() to detect any
+	 * additional CmdSN holes while performing delayed execution.
+	 */
+	if (list_empty(&sess->sess_ooo_cmdsn_list))
+		list_add_tail(&ooo_cmdsn->ooo_list,
+				&sess->sess_ooo_cmdsn_list);
+	else {
+		ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev,
+				typeof(*ooo_tail), ooo_list);
+		/*
+		 * CmdSN is greater than the tail of the list.
+		 */
+		if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn))
+			list_add_tail(&ooo_cmdsn->ooo_list,
+					&sess->sess_ooo_cmdsn_list);
+		else {
+			/*
+			 * CmdSN is either lower than the head,  or somewhere
+			 * in the middle.
+			 */
+			list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
+						ooo_list) {
+				if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn))
+					continue;
+
+				/* Insert before this entry */
+				list_add(&ooo_cmdsn->ooo_list,
+					ooo_tmp->ooo_list.prev);
+				break;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ *	Removes an struct iscsi_ooo_cmdsn from a session's list,
+ *	called with struct iscsi_session->cmdsn_mutex held.
+ */
+void iscsit_remove_ooo_cmdsn(
+	struct iscsi_session *sess,
+	struct iscsi_ooo_cmdsn *ooo_cmdsn)
+{
+	list_del(&ooo_cmdsn->ooo_list);
+	kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
+}
+
+void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
+{
+	struct iscsi_ooo_cmdsn *ooo_cmdsn;
+	struct iscsi_session *sess = conn->sess;
+
+	mutex_lock(&sess->cmdsn_mutex);
+	list_for_each_entry(ooo_cmdsn, &sess->sess_ooo_cmdsn_list, ooo_list) {
+		if (ooo_cmdsn->cid != conn->cid)
+			continue;
+
+		ooo_cmdsn->cmd = NULL;
+	}
+	mutex_unlock(&sess->cmdsn_mutex);
+}
+
+/*
+ *	Called with sess->cmdsn_mutex held.
+ */
+int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess)
+{
+	int ooo_count = 0;
+	struct iscsi_cmd *cmd = NULL;
+	struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
+
+	list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
+				&sess->sess_ooo_cmdsn_list, ooo_list) {
+		if (ooo_cmdsn->cmdsn != sess->exp_cmd_sn)
+			continue;
+
+		if (!ooo_cmdsn->cmd) {
+			sess->exp_cmd_sn++;
+			iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
+			continue;
+		}
+
+		cmd = ooo_cmdsn->cmd;
+		cmd->i_state = cmd->deferred_i_state;
+		ooo_count++;
+		sess->exp_cmd_sn++;
+		pr_debug("Executing out of order CmdSN: 0x%08x,"
+			" incremented ExpCmdSN to 0x%08x.\n",
+			cmd->cmd_sn, sess->exp_cmd_sn);
+
+		iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
+
+		if (iscsit_execute_cmd(cmd, 1) < 0)
+			return -1;
+
+		continue;
+	}
+
+	return ooo_count;
+}
+
+/*
+ *	Called either:
+ *
+ *	1. With sess->cmdsn_mutex held from iscsi_execute_ooo_cmdsns()
+ *	or iscsi_check_received_cmdsn().
+ *	2. With no locks held directly from iscsi_handle_XXX_pdu() functions
+ *	for immediate commands.
+ */
+int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
+{
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	int lr = 0;
+
+	spin_lock_bh(&cmd->istate_lock);
+	if (ooo)
+		cmd->cmd_flags &= ~ICF_OOO_CMDSN;
+
+	switch (cmd->iscsi_opcode) {
+	case ISCSI_OP_SCSI_CMD:
+		/*
+		 * Go ahead and send the CHECK_CONDITION status for
+		 * any SCSI CDB exceptions that may have occurred, also
+		 * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
+		 */
+		if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
+			if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
+				cmd->i_state = ISTATE_SEND_STATUS;
+				spin_unlock_bh(&cmd->istate_lock);
+				iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
+						cmd->i_state);
+				return 0;
+			}
+			spin_unlock_bh(&cmd->istate_lock);
+			/*
+			 * Determine if delayed TASK_ABORTED status for WRITEs
+			 * should be sent now if no unsolicited data out
+			 * payloads are expected, or if the delayed status
+			 * should be sent after unsolicited data out with
+			 * ISCSI_FLAG_CMD_FINAL set in iscsi_handle_data_out()
+			 */
+			if (transport_check_aborted_status(se_cmd,
+					(cmd->unsolicited_data == 0)) != 0)
+				return 0;
+			/*
+			 * Otherwise send CHECK_CONDITION and sense for
+			 * exception
+			 */
+			return transport_send_check_condition_and_sense(se_cmd,
+					se_cmd->scsi_sense_reason, 0);
+		}
+		/*
+		 * Special case for delayed CmdSN with Immediate
+		 * Data and/or Unsolicited Data Out attached.
+		 */
+		if (cmd->immediate_data) {
+			if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
+				spin_unlock_bh(&cmd->istate_lock);
+				return transport_generic_handle_data(
+						&cmd->se_cmd);
+			}
+			spin_unlock_bh(&cmd->istate_lock);
+
+			if (!(cmd->cmd_flags &
+					ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
+				/*
+				 * Send the delayed TASK_ABORTED status for
+				 * WRITEs if no more unsolicitied data is
+				 * expected.
+				 */
+				if (transport_check_aborted_status(se_cmd, 1)
+						!= 0)
+					return 0;
+
+				iscsit_set_dataout_sequence_values(cmd);
+				iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 0);
+			}
+			return 0;
+		}
+		/*
+		 * The default handler.
+		 */
+		spin_unlock_bh(&cmd->istate_lock);
+
+		if ((cmd->data_direction == DMA_TO_DEVICE) &&
+		    !(cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
+			/*
+			 * Send the delayed TASK_ABORTED status for WRITEs if
+			 * no more nsolicitied data is expected.
+			 */
+			if (transport_check_aborted_status(se_cmd, 1) != 0)
+				return 0;
+
+			iscsit_set_dataout_sequence_values(cmd);
+			spin_lock_bh(&cmd->dataout_timeout_lock);
+			iscsit_start_dataout_timer(cmd, cmd->conn);
+			spin_unlock_bh(&cmd->dataout_timeout_lock);
+		}
+		return transport_handle_cdb_direct(&cmd->se_cmd);
+
+	case ISCSI_OP_NOOP_OUT:
+	case ISCSI_OP_TEXT:
+		spin_unlock_bh(&cmd->istate_lock);
+		iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+		break;
+	case ISCSI_OP_SCSI_TMFUNC:
+		if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
+			spin_unlock_bh(&cmd->istate_lock);
+			iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
+					cmd->i_state);
+			return 0;
+		}
+		spin_unlock_bh(&cmd->istate_lock);
+
+		return transport_generic_handle_tmr(&cmd->se_cmd);
+	case ISCSI_OP_LOGOUT:
+		spin_unlock_bh(&cmd->istate_lock);
+		switch (cmd->logout_reason) {
+		case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
+			lr = iscsit_logout_closesession(cmd, cmd->conn);
+			break;
+		case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
+			lr = iscsit_logout_closeconnection(cmd, cmd->conn);
+			break;
+		case ISCSI_LOGOUT_REASON_RECOVERY:
+			lr = iscsit_logout_removeconnforrecovery(cmd, cmd->conn);
+			break;
+		default:
+			pr_err("Unknown iSCSI Logout Request Code:"
+				" 0x%02x\n", cmd->logout_reason);
+			return -1;
+		}
+
+		return lr;
+	default:
+		spin_unlock_bh(&cmd->istate_lock);
+		pr_err("Cannot perform out of order execution for"
+		" unknown iSCSI Opcode: 0x%02x\n", cmd->iscsi_opcode);
+		return -1;
+	}
+
+	return 0;
+}
+
+void iscsit_free_all_ooo_cmdsns(struct iscsi_session *sess)
+{
+	struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
+
+	mutex_lock(&sess->cmdsn_mutex);
+	list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
+			&sess->sess_ooo_cmdsn_list, ooo_list) {
+
+		list_del(&ooo_cmdsn->ooo_list);
+		kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
+	}
+	mutex_unlock(&sess->cmdsn_mutex);
+}
+
+int iscsit_handle_ooo_cmdsn(
+	struct iscsi_session *sess,
+	struct iscsi_cmd *cmd,
+	u32 cmdsn)
+{
+	int batch = 0;
+	struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL, *ooo_tail = NULL;
+
+	cmd->deferred_i_state		= cmd->i_state;
+	cmd->i_state			= ISTATE_DEFERRED_CMD;
+	cmd->cmd_flags			|= ICF_OOO_CMDSN;
+
+	if (list_empty(&sess->sess_ooo_cmdsn_list))
+		batch = 1;
+	else {
+		ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev,
+				typeof(*ooo_tail), ooo_list);
+		if (ooo_tail->cmdsn != (cmdsn - 1))
+			batch = 1;
+	}
+
+	ooo_cmdsn = iscsit_allocate_ooo_cmdsn();
+	if (!ooo_cmdsn)
+		return CMDSN_ERROR_CANNOT_RECOVER;
+
+	ooo_cmdsn->cmd			= cmd;
+	ooo_cmdsn->batch_count		= (batch) ?
+					  (cmdsn - sess->exp_cmd_sn) : 1;
+	ooo_cmdsn->cid			= cmd->conn->cid;
+	ooo_cmdsn->exp_cmdsn		= sess->exp_cmd_sn;
+	ooo_cmdsn->cmdsn		= cmdsn;
+
+	if (iscsit_attach_ooo_cmdsn(sess, ooo_cmdsn) < 0) {
+		kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
+		return CMDSN_ERROR_CANNOT_RECOVER;
+	}
+
+	return CMDSN_HIGHER_THAN_EXP;
+}
+
+static int iscsit_set_dataout_timeout_values(
+	struct iscsi_cmd *cmd,
+	u32 *offset,
+	u32 *length)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_r2t *r2t;
+
+	if (cmd->unsolicited_data) {
+		*offset = 0;
+		*length = (conn->sess->sess_ops->FirstBurstLength >
+			   cmd->data_length) ?
+			   cmd->data_length :
+			   conn->sess->sess_ops->FirstBurstLength;
+		return 0;
+	}
+
+	spin_lock_bh(&cmd->r2t_lock);
+	if (list_empty(&cmd->cmd_r2t_list)) {
+		pr_err("cmd->cmd_r2t_list is empty!\n");
+		spin_unlock_bh(&cmd->r2t_lock);
+		return -1;
+	}
+
+	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+		if (r2t->sent_r2t && !r2t->recovery_r2t && !r2t->seq_complete) {
+			*offset = r2t->offset;
+			*length = r2t->xfer_len;
+			spin_unlock_bh(&cmd->r2t_lock);
+			return 0;
+		}
+	}
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	pr_err("Unable to locate any incomplete DataOUT"
+		" sequences for ITT: 0x%08x.\n", cmd->init_task_tag);
+
+	return -1;
+}
+
+/*
+ *	NOTE: Called from interrupt (timer) context.
+ */
+static void iscsit_handle_dataout_timeout(unsigned long data)
+{
+	u32 pdu_length = 0, pdu_offset = 0;
+	u32 r2t_length = 0, r2t_offset = 0;
+	struct iscsi_cmd *cmd = (struct iscsi_cmd *) data;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_session *sess = NULL;
+	struct iscsi_node_attrib *na;
+
+	iscsit_inc_conn_usage_count(conn);
+
+	spin_lock_bh(&cmd->dataout_timeout_lock);
+	if (cmd->dataout_timer_flags & ISCSI_TF_STOP) {
+		spin_unlock_bh(&cmd->dataout_timeout_lock);
+		iscsit_dec_conn_usage_count(conn);
+		return;
+	}
+	cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
+	sess = conn->sess;
+	na = iscsit_tpg_get_node_attrib(sess);
+
+	if (!sess->sess_ops->ErrorRecoveryLevel) {
+		pr_debug("Unable to recover from DataOut timeout while"
+			" in ERL=0.\n");
+		goto failure;
+	}
+
+	if (++cmd->dataout_timeout_retries == na->dataout_timeout_retries) {
+		pr_debug("Command ITT: 0x%08x exceeded max retries"
+			" for DataOUT timeout %u, closing iSCSI connection.\n",
+			cmd->init_task_tag, na->dataout_timeout_retries);
+		goto failure;
+	}
+
+	cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY;
+
+	if (conn->sess->sess_ops->DataSequenceInOrder) {
+		if (conn->sess->sess_ops->DataPDUInOrder) {
+			pdu_offset = cmd->write_data_done;
+			if ((pdu_offset + (conn->sess->sess_ops->MaxBurstLength -
+			     cmd->next_burst_len)) > cmd->data_length)
+				pdu_length = (cmd->data_length -
+					cmd->write_data_done);
+			else
+				pdu_length = (conn->sess->sess_ops->MaxBurstLength -
+						cmd->next_burst_len);
+		} else {
+			pdu_offset = cmd->seq_start_offset;
+			pdu_length = (cmd->seq_end_offset -
+				cmd->seq_start_offset);
+		}
+	} else {
+		if (iscsit_set_dataout_timeout_values(cmd, &pdu_offset,
+				&pdu_length) < 0)
+			goto failure;
+	}
+
+	if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length,
+			&r2t_offset, &r2t_length) < 0)
+		goto failure;
+
+	pr_debug("Command ITT: 0x%08x timed out waiting for"
+		" completion of %sDataOUT Sequence Offset: %u, Length: %u\n",
+		cmd->init_task_tag, (cmd->unsolicited_data) ? "Unsolicited " :
+		"", r2t_offset, r2t_length);
+
+	if (iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length) < 0)
+		goto failure;
+
+	iscsit_start_dataout_timer(cmd, conn);
+	spin_unlock_bh(&cmd->dataout_timeout_lock);
+	iscsit_dec_conn_usage_count(conn);
+
+	return;
+
+failure:
+	spin_unlock_bh(&cmd->dataout_timeout_lock);
+	iscsit_cause_connection_reinstatement(conn, 0);
+	iscsit_dec_conn_usage_count(conn);
+}
+
+void iscsit_mod_dataout_timer(struct iscsi_cmd *cmd)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+	spin_lock_bh(&cmd->dataout_timeout_lock);
+	if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
+		spin_unlock_bh(&cmd->dataout_timeout_lock);
+		return;
+	}
+
+	mod_timer(&cmd->dataout_timer,
+		(get_jiffies_64() + na->dataout_timeout * HZ));
+	pr_debug("Updated DataOUT timer for ITT: 0x%08x",
+			cmd->init_task_tag);
+	spin_unlock_bh(&cmd->dataout_timeout_lock);
+}
+
+/*
+ *	Called with cmd->dataout_timeout_lock held.
+ */
+void iscsit_start_dataout_timer(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+	if (cmd->dataout_timer_flags & ISCSI_TF_RUNNING)
+		return;
+
+	pr_debug("Starting DataOUT timer for ITT: 0x%08x on"
+		" CID: %hu.\n", cmd->init_task_tag, conn->cid);
+
+	init_timer(&cmd->dataout_timer);
+	cmd->dataout_timer.expires = (get_jiffies_64() + na->dataout_timeout * HZ);
+	cmd->dataout_timer.data = (unsigned long)cmd;
+	cmd->dataout_timer.function = iscsit_handle_dataout_timeout;
+	cmd->dataout_timer_flags &= ~ISCSI_TF_STOP;
+	cmd->dataout_timer_flags |= ISCSI_TF_RUNNING;
+	add_timer(&cmd->dataout_timer);
+}
+
+void iscsit_stop_dataout_timer(struct iscsi_cmd *cmd)
+{
+	spin_lock_bh(&cmd->dataout_timeout_lock);
+	if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
+		spin_unlock_bh(&cmd->dataout_timeout_lock);
+		return;
+	}
+	cmd->dataout_timer_flags |= ISCSI_TF_STOP;
+	spin_unlock_bh(&cmd->dataout_timeout_lock);
+
+	del_timer_sync(&cmd->dataout_timer);
+
+	spin_lock_bh(&cmd->dataout_timeout_lock);
+	cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
+	pr_debug("Stopped DataOUT Timer for ITT: 0x%08x\n",
+			cmd->init_task_tag);
+	spin_unlock_bh(&cmd->dataout_timeout_lock);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_erl1.h b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_erl1.h
new file mode 100644
index 0000000..85e67e2
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_erl1.h
@@ -0,0 +1,26 @@
+#ifndef ISCSI_TARGET_ERL1_H
+#define ISCSI_TARGET_ERL1_H
+
+extern int iscsit_dump_data_payload(struct iscsi_conn *, u32, int);
+extern int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
+			struct iscsi_cmd *, struct iscsi_datain_req *);
+extern int iscsit_create_recovery_datain_values_datasequenceinorder_no(
+			struct iscsi_cmd *, struct iscsi_datain_req *);
+extern int iscsit_handle_recovery_datain_or_r2t(struct iscsi_conn *, unsigned char *,
+			u32, u32, u32, u32);
+extern int iscsit_handle_status_snack(struct iscsi_conn *, u32, u32,
+			u32, u32);
+extern int iscsit_handle_data_ack(struct iscsi_conn *, u32, u32, u32);
+extern int iscsit_dataout_datapduinorder_no_fbit(struct iscsi_cmd *, struct iscsi_pdu *);
+extern int iscsit_recover_dataout_sequence(struct iscsi_cmd *, u32, u32);
+extern void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *);
+extern void iscsit_free_all_ooo_cmdsns(struct iscsi_session *);
+extern int iscsit_execute_ooo_cmdsns(struct iscsi_session *);
+extern int iscsit_execute_cmd(struct iscsi_cmd *, int);
+extern int iscsit_handle_ooo_cmdsn(struct iscsi_session *, struct iscsi_cmd *, u32);
+extern void iscsit_remove_ooo_cmdsn(struct iscsi_session *, struct iscsi_ooo_cmdsn *);
+extern void iscsit_mod_dataout_timer(struct iscsi_cmd *);
+extern void iscsit_start_dataout_timer(struct iscsi_cmd *, struct iscsi_conn *);
+extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
+
+#endif /* ISCSI_TARGET_ERL1_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_erl2.c b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_erl2.c
new file mode 100644
index 0000000..1af1f21
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_erl2.c
@@ -0,0 +1,439 @@
+/*******************************************************************************
+ * This file contains error recovery level two functions used by
+ * the iSCSI Target driver.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target.h"
+
+/*
+ *	FIXME: Does RData SNACK apply here as well?
+ */
+void iscsit_create_conn_recovery_datain_values(
+	struct iscsi_cmd *cmd,
+	u32 exp_data_sn)
+{
+	u32 data_sn = 0;
+	struct iscsi_conn *conn = cmd->conn;
+
+	cmd->next_burst_len = 0;
+	cmd->read_data_done = 0;
+
+	while (exp_data_sn > data_sn) {
+		if ((cmd->next_burst_len +
+		     conn->conn_ops->MaxRecvDataSegmentLength) <
+		     conn->sess->sess_ops->MaxBurstLength) {
+			cmd->read_data_done +=
+			       conn->conn_ops->MaxRecvDataSegmentLength;
+			cmd->next_burst_len +=
+			       conn->conn_ops->MaxRecvDataSegmentLength;
+		} else {
+			cmd->read_data_done +=
+				(conn->sess->sess_ops->MaxBurstLength -
+				cmd->next_burst_len);
+			cmd->next_burst_len = 0;
+		}
+		data_sn++;
+	}
+}
+
+void iscsit_create_conn_recovery_dataout_values(
+	struct iscsi_cmd *cmd)
+{
+	u32 write_data_done = 0;
+	struct iscsi_conn *conn = cmd->conn;
+
+	cmd->data_sn = 0;
+	cmd->next_burst_len = 0;
+
+	while (cmd->write_data_done > write_data_done) {
+		if ((write_data_done + conn->sess->sess_ops->MaxBurstLength) <=
+		     cmd->write_data_done)
+			write_data_done += conn->sess->sess_ops->MaxBurstLength;
+		else
+			break;
+	}
+
+	cmd->write_data_done = write_data_done;
+}
+
+static int iscsit_attach_active_connection_recovery_entry(
+	struct iscsi_session *sess,
+	struct iscsi_conn_recovery *cr)
+{
+	spin_lock(&sess->cr_a_lock);
+	list_add_tail(&cr->cr_list, &sess->cr_active_list);
+	spin_unlock(&sess->cr_a_lock);
+
+	return 0;
+}
+
+static int iscsit_attach_inactive_connection_recovery_entry(
+	struct iscsi_session *sess,
+	struct iscsi_conn_recovery *cr)
+{
+	spin_lock(&sess->cr_i_lock);
+	list_add_tail(&cr->cr_list, &sess->cr_inactive_list);
+
+	sess->conn_recovery_count++;
+	pr_debug("Incremented connection recovery count to %u for"
+		" SID: %u\n", sess->conn_recovery_count, sess->sid);
+	spin_unlock(&sess->cr_i_lock);
+
+	return 0;
+}
+
+struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
+	struct iscsi_session *sess,
+	u16 cid)
+{
+	struct iscsi_conn_recovery *cr;
+
+	spin_lock(&sess->cr_i_lock);
+	list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
+		if (cr->cid == cid) {
+			spin_unlock(&sess->cr_i_lock);
+			return cr;
+		}
+	}
+	spin_unlock(&sess->cr_i_lock);
+
+	return NULL;
+}
+
+void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
+{
+	struct iscsi_cmd *cmd, *cmd_tmp;
+	struct iscsi_conn_recovery *cr, *cr_tmp;
+
+	spin_lock(&sess->cr_a_lock);
+	list_for_each_entry_safe(cr, cr_tmp, &sess->cr_active_list, cr_list) {
+		list_del(&cr->cr_list);
+		spin_unlock(&sess->cr_a_lock);
+
+		spin_lock(&cr->conn_recovery_cmd_lock);
+		list_for_each_entry_safe(cmd, cmd_tmp,
+				&cr->conn_recovery_cmd_list, i_list) {
+
+			list_del(&cmd->i_list);
+			cmd->conn = NULL;
+			spin_unlock(&cr->conn_recovery_cmd_lock);
+			iscsit_free_cmd(cmd);
+			spin_lock(&cr->conn_recovery_cmd_lock);
+		}
+		spin_unlock(&cr->conn_recovery_cmd_lock);
+		spin_lock(&sess->cr_a_lock);
+
+		kfree(cr);
+	}
+	spin_unlock(&sess->cr_a_lock);
+
+	spin_lock(&sess->cr_i_lock);
+	list_for_each_entry_safe(cr, cr_tmp, &sess->cr_inactive_list, cr_list) {
+		list_del(&cr->cr_list);
+		spin_unlock(&sess->cr_i_lock);
+
+		spin_lock(&cr->conn_recovery_cmd_lock);
+		list_for_each_entry_safe(cmd, cmd_tmp,
+				&cr->conn_recovery_cmd_list, i_list) {
+
+			list_del(&cmd->i_list);
+			cmd->conn = NULL;
+			spin_unlock(&cr->conn_recovery_cmd_lock);
+			iscsit_free_cmd(cmd);
+			spin_lock(&cr->conn_recovery_cmd_lock);
+		}
+		spin_unlock(&cr->conn_recovery_cmd_lock);
+		spin_lock(&sess->cr_i_lock);
+
+		kfree(cr);
+	}
+	spin_unlock(&sess->cr_i_lock);
+}
+
+int iscsit_remove_active_connection_recovery_entry(
+	struct iscsi_conn_recovery *cr,
+	struct iscsi_session *sess)
+{
+	spin_lock(&sess->cr_a_lock);
+	list_del(&cr->cr_list);
+
+	sess->conn_recovery_count--;
+	pr_debug("Decremented connection recovery count to %u for"
+		" SID: %u\n", sess->conn_recovery_count, sess->sid);
+	spin_unlock(&sess->cr_a_lock);
+
+	kfree(cr);
+
+	return 0;
+}
+
+int iscsit_remove_inactive_connection_recovery_entry(
+	struct iscsi_conn_recovery *cr,
+	struct iscsi_session *sess)
+{
+	spin_lock(&sess->cr_i_lock);
+	list_del(&cr->cr_list);
+	spin_unlock(&sess->cr_i_lock);
+
+	return 0;
+}
+
+/*
+ *	Called with cr->conn_recovery_cmd_lock help.
+ */
+int iscsit_remove_cmd_from_connection_recovery(
+	struct iscsi_cmd *cmd,
+	struct iscsi_session *sess)
+{
+	struct iscsi_conn_recovery *cr;
+
+	if (!cmd->cr) {
+		pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
+			" is NULL!\n", cmd->init_task_tag);
+		BUG();
+	}
+	cr = cmd->cr;
+
+	list_del(&cmd->i_list);
+	return --cr->cmd_count;
+}
+
+void iscsit_discard_cr_cmds_by_expstatsn(
+	struct iscsi_conn_recovery *cr,
+	u32 exp_statsn)
+{
+	u32 dropped_count = 0;
+	struct iscsi_cmd *cmd, *cmd_tmp;
+	struct iscsi_session *sess = cr->sess;
+
+	spin_lock(&cr->conn_recovery_cmd_lock);
+	list_for_each_entry_safe(cmd, cmd_tmp,
+			&cr->conn_recovery_cmd_list, i_list) {
+
+		if (((cmd->deferred_i_state != ISTATE_SENT_STATUS) &&
+		     (cmd->deferred_i_state != ISTATE_REMOVE)) ||
+		     (cmd->stat_sn >= exp_statsn)) {
+			continue;
+		}
+
+		dropped_count++;
+		pr_debug("Dropping Acknowledged ITT: 0x%08x, StatSN:"
+			" 0x%08x, CID: %hu.\n", cmd->init_task_tag,
+				cmd->stat_sn, cr->cid);
+
+		iscsit_remove_cmd_from_connection_recovery(cmd, sess);
+
+		spin_unlock(&cr->conn_recovery_cmd_lock);
+		iscsit_free_cmd(cmd);
+		spin_lock(&cr->conn_recovery_cmd_lock);
+	}
+	spin_unlock(&cr->conn_recovery_cmd_lock);
+
+	pr_debug("Dropped %u total acknowledged commands on"
+		" CID: %hu less than old ExpStatSN: 0x%08x\n",
+			dropped_count, cr->cid, exp_statsn);
+
+	if (!cr->cmd_count) {
+		pr_debug("No commands to be reassigned for failed"
+			" connection CID: %hu on SID: %u\n",
+			cr->cid, sess->sid);
+		iscsit_remove_inactive_connection_recovery_entry(cr, sess);
+		iscsit_attach_active_connection_recovery_entry(sess, cr);
+		pr_debug("iSCSI connection recovery successful for CID:"
+			" %hu on SID: %u\n", cr->cid, sess->sid);
+		iscsit_remove_active_connection_recovery_entry(cr, sess);
+	} else {
+		iscsit_remove_inactive_connection_recovery_entry(cr, sess);
+		iscsit_attach_active_connection_recovery_entry(sess, cr);
+	}
+}
+
+int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
+{
+	u32 dropped_count = 0;
+	struct iscsi_cmd *cmd, *cmd_tmp;
+	struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
+	struct iscsi_session *sess = conn->sess;
+
+	mutex_lock(&sess->cmdsn_mutex);
+	list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
+			&sess->sess_ooo_cmdsn_list, ooo_list) {
+
+		if (ooo_cmdsn->cid != conn->cid)
+			continue;
+
+		dropped_count++;
+		pr_debug("Dropping unacknowledged CmdSN:"
+		" 0x%08x during connection recovery on CID: %hu\n",
+			ooo_cmdsn->cmdsn, conn->cid);
+		iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
+	}
+	mutex_unlock(&sess->cmdsn_mutex);
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
+		if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
+			continue;
+
+		list_del(&cmd->i_list);
+
+		spin_unlock_bh(&conn->cmd_lock);
+		iscsit_free_cmd(cmd);
+		spin_lock_bh(&conn->cmd_lock);
+	}
+	spin_unlock_bh(&conn->cmd_lock);
+
+	pr_debug("Dropped %u total unacknowledged commands on CID:"
+		" %hu for ExpCmdSN: 0x%08x.\n", dropped_count, conn->cid,
+				sess->exp_cmd_sn);
+	return 0;
+}
+
+int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
+{
+	u32 cmd_count = 0;
+	struct iscsi_cmd *cmd, *cmd_tmp;
+	struct iscsi_conn_recovery *cr;
+
+	/*
+	 * Allocate an struct iscsi_conn_recovery for this connection.
+	 * Each struct iscsi_cmd contains an struct iscsi_conn_recovery pointer
+	 * (struct iscsi_cmd->cr) so we need to allocate this before preparing the
+	 * connection's command list for connection recovery.
+	 */
+	cr = kzalloc(sizeof(struct iscsi_conn_recovery), GFP_KERNEL);
+	if (!cr) {
+		pr_err("Unable to allocate memory for"
+			" struct iscsi_conn_recovery.\n");
+		return -1;
+	}
+	INIT_LIST_HEAD(&cr->cr_list);
+	INIT_LIST_HEAD(&cr->conn_recovery_cmd_list);
+	spin_lock_init(&cr->conn_recovery_cmd_lock);
+	/*
+	 * Only perform connection recovery on ISCSI_OP_SCSI_CMD or
+	 * ISCSI_OP_NOOP_OUT opcodes.  For all other opcodes call
+	 * list_del(&cmd->i_list); to release the command to the
+	 * session pool and remove it from the connection's list.
+	 *
+	 * Also stop the DataOUT timer, which will be restarted after
+	 * sending the TMR response.
+	 */
+	spin_lock_bh(&conn->cmd_lock);
+	list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
+
+		if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) &&
+		    (cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) {
+			pr_debug("Not performing realligence on"
+				" Opcode: 0x%02x, ITT: 0x%08x, CmdSN: 0x%08x,"
+				" CID: %hu\n", cmd->iscsi_opcode,
+				cmd->init_task_tag, cmd->cmd_sn, conn->cid);
+
+			list_del(&cmd->i_list);
+			spin_unlock_bh(&conn->cmd_lock);
+			iscsit_free_cmd(cmd);
+			spin_lock_bh(&conn->cmd_lock);
+			continue;
+		}
+
+		/*
+		 * Special case where commands greater than or equal to
+		 * the session's ExpCmdSN are attached to the connection
+		 * list but not to the out of order CmdSN list.  The one
+		 * obvious case is when a command with immediate data
+		 * attached must only check the CmdSN against ExpCmdSN
+		 * after the data is received.  The special case below
+		 * is when the connection fails before data is received,
+		 * but also may apply to other PDUs, so it has been
+		 * made generic here.
+		 */
+		if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
+		     (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {
+			list_del(&cmd->i_list);
+			spin_unlock_bh(&conn->cmd_lock);
+			iscsit_free_cmd(cmd);
+			spin_lock_bh(&conn->cmd_lock);
+			continue;
+		}
+
+		cmd_count++;
+		pr_debug("Preparing Opcode: 0x%02x, ITT: 0x%08x,"
+			" CmdSN: 0x%08x, StatSN: 0x%08x, CID: %hu for"
+			" realligence.\n", cmd->iscsi_opcode,
+			cmd->init_task_tag, cmd->cmd_sn, cmd->stat_sn,
+			conn->cid);
+
+		cmd->deferred_i_state = cmd->i_state;
+		cmd->i_state = ISTATE_IN_CONNECTION_RECOVERY;
+
+		if (cmd->data_direction == DMA_TO_DEVICE)
+			iscsit_stop_dataout_timer(cmd);
+
+		cmd->sess = conn->sess;
+
+		list_del(&cmd->i_list);
+		spin_unlock_bh(&conn->cmd_lock);
+
+		iscsit_free_all_datain_reqs(cmd);
+
+		transport_wait_for_tasks(&cmd->se_cmd);
+		/*
+		 * Add the struct iscsi_cmd to the connection recovery cmd list
+		 */
+		spin_lock(&cr->conn_recovery_cmd_lock);
+		list_add_tail(&cmd->i_list, &cr->conn_recovery_cmd_list);
+		spin_unlock(&cr->conn_recovery_cmd_lock);
+
+		spin_lock_bh(&conn->cmd_lock);
+		cmd->cr = cr;
+		cmd->conn = NULL;
+	}
+	spin_unlock_bh(&conn->cmd_lock);
+	/*
+	 * Fill in the various values in the preallocated struct iscsi_conn_recovery.
+	 */
+	cr->cid = conn->cid;
+	cr->cmd_count = cmd_count;
+	cr->maxrecvdatasegmentlength = conn->conn_ops->MaxRecvDataSegmentLength;
+	cr->sess = conn->sess;
+
+	iscsit_attach_inactive_connection_recovery_entry(conn->sess, cr);
+
+	return 0;
+}
+
+int iscsit_connection_recovery_transport_reset(struct iscsi_conn *conn)
+{
+	atomic_set(&conn->connection_recovery, 1);
+
+	if (iscsit_close_connection(conn) < 0)
+		return -1;
+
+	return 0;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_erl2.h b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_erl2.h
new file mode 100644
index 0000000..22f8d24
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_erl2.h
@@ -0,0 +1,18 @@
+#ifndef ISCSI_TARGET_ERL2_H
+#define ISCSI_TARGET_ERL2_H
+
+extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, u32);
+extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
+extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
+			struct iscsi_session *, u16);
+extern void iscsit_free_connection_recovery_entires(struct iscsi_session *);
+extern int iscsit_remove_active_connection_recovery_entry(
+			struct iscsi_conn_recovery *, struct iscsi_session *);
+extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *,
+			struct iscsi_session *);
+extern void iscsit_discard_cr_cmds_by_expstatsn(struct iscsi_conn_recovery *, u32);
+extern int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *);
+extern int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *);
+extern int iscsit_connection_recovery_transport_reset(struct iscsi_conn *);
+
+#endif /*** ISCSI_TARGET_ERL2_H ***/
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_login.c b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_login.c
new file mode 100644
index 0000000..bc7d08f
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_login.c
@@ -0,0 +1,1205 @@
+/*******************************************************************************
+ * This file contains the login functions used by the iSCSI Target driver.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/kthread.h>
+#include <linux/crypto.h>
+#include <linux/idr.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_tq.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_nego.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_login.h"
+#include "iscsi_target_stat.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_parameters.h"
+
+extern struct idr sess_idr;
+extern struct mutex auth_id_lock;
+extern spinlock_t sess_idr_lock;
+
+static int iscsi_login_init_conn(struct iscsi_conn *conn)
+{
+	init_waitqueue_head(&conn->queues_wq);
+	INIT_LIST_HEAD(&conn->conn_list);
+	INIT_LIST_HEAD(&conn->conn_cmd_list);
+	INIT_LIST_HEAD(&conn->immed_queue_list);
+	INIT_LIST_HEAD(&conn->response_queue_list);
+	init_completion(&conn->conn_post_wait_comp);
+	init_completion(&conn->conn_wait_comp);
+	init_completion(&conn->conn_wait_rcfr_comp);
+	init_completion(&conn->conn_waiting_on_uc_comp);
+	init_completion(&conn->conn_logout_comp);
+	init_completion(&conn->rx_half_close_comp);
+	init_completion(&conn->tx_half_close_comp);
+	spin_lock_init(&conn->cmd_lock);
+	spin_lock_init(&conn->conn_usage_lock);
+	spin_lock_init(&conn->immed_queue_lock);
+	spin_lock_init(&conn->nopin_timer_lock);
+	spin_lock_init(&conn->response_queue_lock);
+	spin_lock_init(&conn->state_lock);
+
+	if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
+		pr_err("Unable to allocate conn->conn_cpumask\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/*
+ * Used by iscsi_target_nego.c:iscsi_target_locate_portal() to setup
+ * per struct iscsi_conn libcrypto contexts for crc32c and crc32-intel
+ */
+int iscsi_login_setup_crypto(struct iscsi_conn *conn)
+{
+	/*
+	 * Setup slicing by CRC32C algorithm for RX and TX libcrypto contexts
+	 * which will default to crc32c_intel.ko for cpu_has_xmm4_2, or fallback
+	 * to software 1x8 byte slicing from crc32c.ko
+	 */
+	conn->conn_rx_hash.flags = 0;
+	conn->conn_rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+						CRYPTO_ALG_ASYNC);
+	if (IS_ERR(conn->conn_rx_hash.tfm)) {
+		pr_err("crypto_alloc_hash() failed for conn_rx_tfm\n");
+		return -ENOMEM;
+	}
+
+	conn->conn_tx_hash.flags = 0;
+	conn->conn_tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+						CRYPTO_ALG_ASYNC);
+	if (IS_ERR(conn->conn_tx_hash.tfm)) {
+		pr_err("crypto_alloc_hash() failed for conn_tx_tfm\n");
+		crypto_free_hash(conn->conn_rx_hash.tfm);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int iscsi_login_check_initiator_version(
+	struct iscsi_conn *conn,
+	u8 version_max,
+	u8 version_min)
+{
+	if ((version_max != 0x00) || (version_min != 0x00)) {
+		pr_err("Unsupported iSCSI IETF Pre-RFC Revision,"
+			" version Min/Max 0x%02x/0x%02x, rejecting login.\n",
+			version_min, version_max);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_NO_VERSION);
+		return -1;
+	}
+
+	return 0;
+}
+
+int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
+{
+	int sessiontype;
+	struct iscsi_param *initiatorname_param = NULL, *sessiontype_param = NULL;
+	struct iscsi_portal_group *tpg = conn->tpg;
+	struct iscsi_session *sess = NULL, *sess_p = NULL;
+	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+	struct se_session *se_sess, *se_sess_tmp;
+
+	initiatorname_param = iscsi_find_param_from_key(
+			INITIATORNAME, conn->param_list);
+	sessiontype_param = iscsi_find_param_from_key(
+			SESSIONTYPE, conn->param_list);
+	if (!initiatorname_param || !sessiontype_param) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+			ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+		return -1;
+	}
+
+	sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0;
+
+	spin_lock_bh(&se_tpg->session_lock);
+	list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
+			sess_list) {
+
+		sess_p = se_sess->fabric_sess_ptr;
+		spin_lock(&sess_p->conn_lock);
+		if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
+		    atomic_read(&sess_p->session_logout) ||
+		    (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+			spin_unlock(&sess_p->conn_lock);
+			continue;
+		}
+		if (!memcmp(sess_p->isid, conn->sess->isid, 6) &&
+		   (!strcmp(sess_p->sess_ops->InitiatorName,
+			    initiatorname_param->value) &&
+		   (sess_p->sess_ops->SessionType == sessiontype))) {
+			atomic_set(&sess_p->session_reinstatement, 1);
+			spin_unlock(&sess_p->conn_lock);
+			iscsit_inc_session_usage_count(sess_p);
+			iscsit_stop_time2retain_timer(sess_p);
+			sess = sess_p;
+			break;
+		}
+		spin_unlock(&sess_p->conn_lock);
+	}
+	spin_unlock_bh(&se_tpg->session_lock);
+	/*
+	 * If the Time2Retain handler has expired, the session is already gone.
+	 */
+	if (!sess)
+		return 0;
+
+	pr_debug("%s iSCSI Session SID %u is still active for %s,"
+		" preforming session reinstatement.\n", (sessiontype) ?
+		"Discovery" : "Normal", sess->sid,
+		sess->sess_ops->InitiatorName);
+
+	spin_lock_bh(&sess->conn_lock);
+	if (sess->session_state == TARG_SESS_STATE_FAILED) {
+		spin_unlock_bh(&sess->conn_lock);
+		iscsit_dec_session_usage_count(sess);
+		target_put_session(sess->se_sess);
+		return 0;
+	}
+	spin_unlock_bh(&sess->conn_lock);
+
+	iscsit_stop_session(sess, 1, 1);
+	iscsit_dec_session_usage_count(sess);
+
+	target_put_session(sess->se_sess);
+	return 0;
+}
+
+static void iscsi_login_set_conn_values(
+	struct iscsi_session *sess,
+	struct iscsi_conn *conn,
+	u16 cid)
+{
+	conn->sess		= sess;
+	conn->cid		= cid;
+	/*
+	 * Generate a random Status sequence number (statsn) for the new
+	 * iSCSI connection.
+	 */
+	get_random_bytes(&conn->stat_sn, sizeof(u32));
+
+	mutex_lock(&auth_id_lock);
+	conn->auth_id		= iscsit_global->auth_id++;
+	mutex_unlock(&auth_id_lock);
+}
+
+/*
+ *	This is the leading connection of a new session,
+ *	or session reinstatement.
+ */
+static int iscsi_login_zero_tsih_s1(
+	struct iscsi_conn *conn,
+	unsigned char *buf)
+{
+	struct iscsi_session *sess = NULL;
+	struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+
+	sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
+	if (!sess) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		pr_err("Could not allocate memory for session\n");
+		return -ENOMEM;
+	}
+
+	iscsi_login_set_conn_values(sess, conn, pdu->cid);
+	sess->init_task_tag	= pdu->itt;
+	memcpy(&sess->isid, pdu->isid, 6);
+	sess->exp_cmd_sn	= pdu->cmdsn;
+	INIT_LIST_HEAD(&sess->sess_conn_list);
+	INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list);
+	INIT_LIST_HEAD(&sess->cr_active_list);
+	INIT_LIST_HEAD(&sess->cr_inactive_list);
+	init_completion(&sess->async_msg_comp);
+	init_completion(&sess->reinstatement_comp);
+	init_completion(&sess->session_wait_comp);
+	init_completion(&sess->session_waiting_on_uc_comp);
+	mutex_init(&sess->cmdsn_mutex);
+	spin_lock_init(&sess->conn_lock);
+	spin_lock_init(&sess->cr_a_lock);
+	spin_lock_init(&sess->cr_i_lock);
+	spin_lock_init(&sess->session_usage_lock);
+	spin_lock_init(&sess->ttt_lock);
+
+	if (!idr_pre_get(&sess_idr, GFP_KERNEL)) {
+		pr_err("idr_pre_get() for sess_idr failed\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		kfree(sess);
+		return -ENOMEM;
+	}
+	spin_lock(&sess_idr_lock);
+	idr_get_new(&sess_idr, NULL, &sess->session_index);
+	spin_unlock(&sess_idr_lock);
+
+	sess->creation_time = get_jiffies_64();
+	spin_lock_init(&sess->session_stats_lock);
+	/*
+	 * The FFP CmdSN window values will be allocated from the TPG's
+	 * Initiator Node's ACL once the login has been successfully completed.
+	 */
+	sess->max_cmd_sn	= pdu->cmdsn;
+
+	sess->sess_ops = kzalloc(sizeof(struct iscsi_sess_ops), GFP_KERNEL);
+	if (!sess->sess_ops) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		pr_err("Unable to allocate memory for"
+				" struct iscsi_sess_ops.\n");
+		kfree(sess);
+		return -ENOMEM;
+	}
+
+	sess->se_sess = transport_init_session();
+	if (IS_ERR(sess->se_sess)) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		kfree(sess);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int iscsi_login_zero_tsih_s2(
+	struct iscsi_conn *conn)
+{
+	struct iscsi_node_attrib *na;
+	struct iscsi_session *sess = conn->sess;
+	unsigned char buf[32];
+
+	sess->tpg = conn->tpg;
+
+	/*
+	 * Assign a new TPG Session Handle.  Note this is protected with
+	 * struct iscsi_portal_group->np_login_sem from iscsit_access_np().
+	 */
+	sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
+	if (!sess->tsih)
+		sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
+
+	/*
+	 * Create the default params from user defined values..
+	 */
+	if (iscsi_copy_param_list(&conn->param_list,
+				ISCSI_TPG_C(conn)->param_list, 1) < 0) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		return -1;
+	}
+
+	iscsi_set_keys_to_negotiate(0, conn->param_list);
+
+	if (sess->sess_ops->SessionType)
+		return iscsi_set_keys_irrelevant_for_discovery(
+				conn->param_list);
+
+	na = iscsit_tpg_get_node_attrib(sess);
+
+	/*
+	 * Need to send TargetPortalGroupTag back in first login response
+	 * on any iSCSI connection where the Initiator provides TargetName.
+	 * See 5.3.1.  Login Phase Start
+	 *
+	 * In our case, we have already located the struct iscsi_tiqn at this point.
+	 */
+	memset(buf, 0, 32);
+	sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
+	if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		return -1;
+	}
+
+	/*
+	 * Workaround for Initiators that have broken connection recovery logic.
+	 *
+	 * "We would really like to get rid of this." Linux-iSCSI.org team
+	 */
+	memset(buf, 0, 32);
+	sprintf(buf, "ErrorRecoveryLevel=%d", na->default_erl);
+	if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		return -1;
+	}
+
+	if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0)
+		return -1;
+
+	return 0;
+}
+
+/*
+ * Remove PSTATE_NEGOTIATE for the four FIM related keys.
+ * The Initiator node will be able to enable FIM by proposing them itself.
+ */
+int iscsi_login_disable_FIM_keys(
+	struct iscsi_param_list *param_list,
+	struct iscsi_conn *conn)
+{
+	struct iscsi_param *param;
+
+	param = iscsi_find_param_from_key("OFMarker", param_list);
+	if (!param) {
+		pr_err("iscsi_find_param_from_key() for"
+				" OFMarker failed\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		return -1;
+	}
+	param->state &= ~PSTATE_NEGOTIATE;
+
+	param = iscsi_find_param_from_key("OFMarkInt", param_list);
+	if (!param) {
+		pr_err("iscsi_find_param_from_key() for"
+				" IFMarker failed\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		return -1;
+	}
+	param->state &= ~PSTATE_NEGOTIATE;
+
+	param = iscsi_find_param_from_key("IFMarker", param_list);
+	if (!param) {
+		pr_err("iscsi_find_param_from_key() for"
+				" IFMarker failed\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		return -1;
+	}
+	param->state &= ~PSTATE_NEGOTIATE;
+
+	param = iscsi_find_param_from_key("IFMarkInt", param_list);
+	if (!param) {
+		pr_err("iscsi_find_param_from_key() for"
+				" IFMarker failed\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		return -1;
+	}
+	param->state &= ~PSTATE_NEGOTIATE;
+
+	return 0;
+}
+
+static int iscsi_login_non_zero_tsih_s1(
+	struct iscsi_conn *conn,
+	unsigned char *buf)
+{
+	struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+
+	iscsi_login_set_conn_values(NULL, conn, pdu->cid);
+	return 0;
+}
+
+/*
+ *	Add a new connection to an existing session.
+ */
+static int iscsi_login_non_zero_tsih_s2(
+	struct iscsi_conn *conn,
+	unsigned char *buf)
+{
+	struct iscsi_portal_group *tpg = conn->tpg;
+	struct iscsi_session *sess = NULL, *sess_p = NULL;
+	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+	struct se_session *se_sess, *se_sess_tmp;
+	struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+
+	spin_lock_bh(&se_tpg->session_lock);
+	list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
+			sess_list) {
+
+		sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+		if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
+		    atomic_read(&sess_p->session_logout) ||
+		   (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
+			continue;
+		if (!memcmp(sess_p->isid, pdu->isid, 6) &&
+		     (sess_p->tsih == pdu->tsih)) {
+			iscsit_inc_session_usage_count(sess_p);
+			iscsit_stop_time2retain_timer(sess_p);
+			sess = sess_p;
+			break;
+		}
+	}
+	spin_unlock_bh(&se_tpg->session_lock);
+
+	/*
+	 * If the Time2Retain handler has expired, the session is already gone.
+	 */
+	if (!sess) {
+		pr_err("Initiator attempting to add a connection to"
+			" a non-existent session, rejecting iSCSI Login.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_NO_SESSION);
+		return -1;
+	}
+
+	/*
+	 * Stop the Time2Retain timer if this is a failed session, we restart
+	 * the timer if the login is not successful.
+	 */
+	spin_lock_bh(&sess->conn_lock);
+	if (sess->session_state == TARG_SESS_STATE_FAILED)
+		atomic_set(&sess->session_continuation, 1);
+	spin_unlock_bh(&sess->conn_lock);
+
+	iscsi_login_set_conn_values(sess, conn, pdu->cid);
+
+	if (iscsi_copy_param_list(&conn->param_list,
+			ISCSI_TPG_C(conn)->param_list, 0) < 0) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		return -1;
+	}
+
+	iscsi_set_keys_to_negotiate(0, conn->param_list);
+	/*
+	 * Need to send TargetPortalGroupTag back in first login response
+	 * on any iSCSI connection where the Initiator provides TargetName.
+	 * See 5.3.1.  Login Phase Start
+	 *
+	 * In our case, we have already located the struct iscsi_tiqn at this point.
+	 */
+	memset(buf, 0, 32);
+	sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
+	if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		return -1;
+	}
+
+	return iscsi_login_disable_FIM_keys(conn->param_list, conn);
+}
+
+int iscsi_login_post_auth_non_zero_tsih(
+	struct iscsi_conn *conn,
+	u16 cid,
+	u32 exp_statsn)
+{
+	struct iscsi_conn *conn_ptr = NULL;
+	struct iscsi_conn_recovery *cr = NULL;
+	struct iscsi_session *sess = conn->sess;
+
+	/*
+	 * By following item 5 in the login table,  if we have found
+	 * an existing ISID and a valid/existing TSIH and an existing
+	 * CID we do connection reinstatement.  Currently we dont not
+	 * support it so we send back an non-zero status class to the
+	 * initiator and release the new connection.
+	 */
+	conn_ptr = iscsit_get_conn_from_cid_rcfr(sess, cid);
+	if ((conn_ptr)) {
+		pr_err("Connection exists with CID %hu for %s,"
+			" performing connection reinstatement.\n",
+			conn_ptr->cid, sess->sess_ops->InitiatorName);
+
+		iscsit_connection_reinstatement_rcfr(conn_ptr);
+		iscsit_dec_conn_usage_count(conn_ptr);
+	}
+
+	/*
+	 * Check for any connection recovery entires containing CID.
+	 * We use the original ExpStatSN sent in the first login request
+	 * to acknowledge commands for the failed connection.
+	 *
+	 * Also note that an explict logout may have already been sent,
+	 * but the response may not be sent due to additional connection
+	 * loss.
+	 */
+	if (sess->sess_ops->ErrorRecoveryLevel == 2) {
+		cr = iscsit_get_inactive_connection_recovery_entry(
+				sess, cid);
+		if ((cr)) {
+			pr_debug("Performing implicit logout"
+				" for connection recovery on CID: %hu\n",
+					conn->cid);
+			iscsit_discard_cr_cmds_by_expstatsn(cr, exp_statsn);
+		}
+	}
+
+	/*
+	 * Else we follow item 4 from the login table in that we have
+	 * found an existing ISID and a valid/existing TSIH and a new
+	 * CID we go ahead and continue to add a new connection to the
+	 * session.
+	 */
+	pr_debug("Adding CID %hu to existing session for %s.\n",
+			cid, sess->sess_ops->InitiatorName);
+
+	if ((atomic_read(&sess->nconn) + 1) > sess->sess_ops->MaxConnections) {
+		pr_err("Adding additional connection to this session"
+			" would exceed MaxConnections %d, login failed.\n",
+				sess->sess_ops->MaxConnections);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_ISID_ERROR);
+		return -1;
+	}
+
+	return 0;
+}
+
+static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
+{
+	struct iscsi_session *sess = conn->sess;
+
+	if (!sess->sess_ops->SessionType)
+		iscsit_start_nopin_timer(conn);
+}
+
+static int iscsi_post_login_handler(
+	struct iscsi_np *np,
+	struct iscsi_conn *conn,
+	u8 zero_tsih)
+{
+	int stop_timer = 0;
+	struct iscsi_session *sess = conn->sess;
+	struct se_session *se_sess = sess->se_sess;
+	struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+	struct iscsi_thread_set *ts;
+
+	iscsit_inc_conn_usage_count(conn);
+
+	iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_SUCCESS,
+			ISCSI_LOGIN_STATUS_ACCEPT);
+
+	pr_debug("Moving to TARG_CONN_STATE_LOGGED_IN.\n");
+	conn->conn_state = TARG_CONN_STATE_LOGGED_IN;
+
+	iscsi_set_connection_parameters(conn->conn_ops, conn->param_list);
+	iscsit_set_sync_and_steering_values(conn);
+	/*
+	 * SCSI Initiator -> SCSI Target Port Mapping
+	 */
+	ts = iscsi_get_thread_set();
+	if (!zero_tsih) {
+		iscsi_set_session_parameters(sess->sess_ops,
+				conn->param_list, 0);
+		iscsi_release_param_list(conn->param_list);
+		conn->param_list = NULL;
+
+		spin_lock_bh(&sess->conn_lock);
+		atomic_set(&sess->session_continuation, 0);
+		if (sess->session_state == TARG_SESS_STATE_FAILED) {
+			pr_debug("Moving to"
+					" TARG_SESS_STATE_LOGGED_IN.\n");
+			sess->session_state = TARG_SESS_STATE_LOGGED_IN;
+			stop_timer = 1;
+		}
+
+		pr_debug("iSCSI Login successful on CID: %hu from %s to"
+			" %s:%hu,%hu\n", conn->cid, conn->login_ip,
+			conn->local_ip, conn->local_port, tpg->tpgt);
+
+		list_add_tail(&conn->conn_list, &sess->sess_conn_list);
+		atomic_inc(&sess->nconn);
+		pr_debug("Incremented iSCSI Connection count to %hu"
+			" from node: %s\n", atomic_read(&sess->nconn),
+			sess->sess_ops->InitiatorName);
+		spin_unlock_bh(&sess->conn_lock);
+
+		iscsi_post_login_start_timers(conn);
+		iscsi_activate_thread_set(conn, ts);
+		/*
+		 * Determine CPU mask to ensure connection's RX and TX kthreads
+		 * are scheduled on the same CPU.
+		 */
+		iscsit_thread_get_cpumask(conn);
+		conn->conn_rx_reset_cpumask = 1;
+		conn->conn_tx_reset_cpumask = 1;
+
+		iscsit_dec_conn_usage_count(conn);
+		if (stop_timer) {
+			spin_lock_bh(&se_tpg->session_lock);
+			iscsit_stop_time2retain_timer(sess);
+			spin_unlock_bh(&se_tpg->session_lock);
+		}
+		iscsit_dec_session_usage_count(sess);
+		return 0;
+	}
+
+	iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
+	iscsi_release_param_list(conn->param_list);
+	conn->param_list = NULL;
+
+	iscsit_determine_maxcmdsn(sess);
+
+	spin_lock_bh(&se_tpg->session_lock);
+	__transport_register_session(&sess->tpg->tpg_se_tpg,
+			se_sess->se_node_acl, se_sess, sess);
+	pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n");
+	sess->session_state = TARG_SESS_STATE_LOGGED_IN;
+
+	pr_debug("iSCSI Login successful on CID: %hu from %s to %s:%hu,%hu\n",
+		conn->cid, conn->login_ip, conn->local_ip, conn->local_port,
+		tpg->tpgt);
+
+	spin_lock_bh(&sess->conn_lock);
+	list_add_tail(&conn->conn_list, &sess->sess_conn_list);
+	atomic_inc(&sess->nconn);
+	pr_debug("Incremented iSCSI Connection count to %hu from node:"
+		" %s\n", atomic_read(&sess->nconn),
+		sess->sess_ops->InitiatorName);
+	spin_unlock_bh(&sess->conn_lock);
+
+	sess->sid = tpg->sid++;
+	if (!sess->sid)
+		sess->sid = tpg->sid++;
+	pr_debug("Established iSCSI session from node: %s\n",
+			sess->sess_ops->InitiatorName);
+
+	tpg->nsessions++;
+	if (tpg->tpg_tiqn)
+		tpg->tpg_tiqn->tiqn_nsessions++;
+
+	pr_debug("Incremented number of active iSCSI sessions to %u on"
+		" iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
+	spin_unlock_bh(&se_tpg->session_lock);
+
+	iscsi_post_login_start_timers(conn);
+	iscsi_activate_thread_set(conn, ts);
+	/*
+	 * Determine CPU mask to ensure connection's RX and TX kthreads
+	 * are scheduled on the same CPU.
+	 */
+	iscsit_thread_get_cpumask(conn);
+	conn->conn_rx_reset_cpumask = 1;
+	conn->conn_tx_reset_cpumask = 1;
+
+	iscsit_dec_conn_usage_count(conn);
+
+	return 0;
+}
+
+static void iscsi_handle_login_thread_timeout(unsigned long data)
+{
+	struct iscsi_np *np = (struct iscsi_np *) data;
+
+	spin_lock_bh(&np->np_thread_lock);
+	pr_err("iSCSI Login timeout on Network Portal %s:%hu\n",
+			np->np_ip, np->np_port);
+
+	if (np->np_login_timer_flags & ISCSI_TF_STOP) {
+		spin_unlock_bh(&np->np_thread_lock);
+		return;
+	}
+
+	if (np->np_thread)
+		send_sig(SIGINT, np->np_thread, 1);
+
+	np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
+	spin_unlock_bh(&np->np_thread_lock);
+}
+
+static void iscsi_start_login_thread_timer(struct iscsi_np *np)
+{
+	/*
+	 * This used the TA_LOGIN_TIMEOUT constant because at this
+	 * point we do not have access to ISCSI_TPG_ATTRIB(tpg)->login_timeout
+	 */
+	spin_lock_bh(&np->np_thread_lock);
+	init_timer(&np->np_login_timer);
+	np->np_login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ);
+	np->np_login_timer.data = (unsigned long)np;
+	np->np_login_timer.function = iscsi_handle_login_thread_timeout;
+	np->np_login_timer_flags &= ~ISCSI_TF_STOP;
+	np->np_login_timer_flags |= ISCSI_TF_RUNNING;
+	add_timer(&np->np_login_timer);
+
+	pr_debug("Added timeout timer to iSCSI login request for"
+			" %u seconds.\n", TA_LOGIN_TIMEOUT);
+	spin_unlock_bh(&np->np_thread_lock);
+}
+
+static void iscsi_stop_login_thread_timer(struct iscsi_np *np)
+{
+	spin_lock_bh(&np->np_thread_lock);
+	if (!(np->np_login_timer_flags & ISCSI_TF_RUNNING)) {
+		spin_unlock_bh(&np->np_thread_lock);
+		return;
+	}
+	np->np_login_timer_flags |= ISCSI_TF_STOP;
+	spin_unlock_bh(&np->np_thread_lock);
+
+	del_timer_sync(&np->np_login_timer);
+
+	spin_lock_bh(&np->np_thread_lock);
+	np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
+	spin_unlock_bh(&np->np_thread_lock);
+}
+
+int iscsi_target_setup_login_socket(
+	struct iscsi_np *np,
+	struct __kernel_sockaddr_storage *sockaddr)
+{
+	struct socket *sock;
+	int backlog = 5, ret, opt = 0, len;
+
+	switch (np->np_network_transport) {
+	case ISCSI_TCP:
+		np->np_ip_proto = IPPROTO_TCP;
+		np->np_sock_type = SOCK_STREAM;
+		break;
+	case ISCSI_SCTP_TCP:
+		np->np_ip_proto = IPPROTO_SCTP;
+		np->np_sock_type = SOCK_STREAM;
+		break;
+	case ISCSI_SCTP_UDP:
+		np->np_ip_proto = IPPROTO_SCTP;
+		np->np_sock_type = SOCK_SEQPACKET;
+		break;
+	case ISCSI_IWARP_TCP:
+	case ISCSI_IWARP_SCTP:
+	case ISCSI_INFINIBAND:
+	default:
+		pr_err("Unsupported network_transport: %d\n",
+				np->np_network_transport);
+		return -EINVAL;
+	}
+
+	ret = sock_create(sockaddr->ss_family, np->np_sock_type,
+			np->np_ip_proto, &sock);
+	if (ret < 0) {
+		pr_err("sock_create() failed.\n");
+		return ret;
+	}
+	np->np_socket = sock;
+	/*
+	 * Setup the np->np_sockaddr from the passed sockaddr setup
+	 * in iscsi_target_configfs.c code..
+	 */
+	memcpy(&np->np_sockaddr, sockaddr,
+			sizeof(struct __kernel_sockaddr_storage));
+
+	if (sockaddr->ss_family == AF_INET6)
+		len = sizeof(struct sockaddr_in6);
+	else
+		len = sizeof(struct sockaddr_in);
+	/*
+	 * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
+	 */
+	/* FIXME: Someone please explain why this is endian-safe */
+	opt = 1;
+	if (np->np_network_transport == ISCSI_TCP) {
+		ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
+				(char *)&opt, sizeof(opt));
+		if (ret < 0) {
+			pr_err("kernel_setsockopt() for TCP_NODELAY"
+				" failed: %d\n", ret);
+			goto fail;
+		}
+	}
+
+	/* FIXME: Someone please explain why this is endian-safe */
+	ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
+			(char *)&opt, sizeof(opt));
+	if (ret < 0) {
+		pr_err("kernel_setsockopt() for SO_REUSEADDR"
+			" failed\n");
+		goto fail;
+	}
+
+	ret = kernel_setsockopt(sock, IPPROTO_IP, IP_FREEBIND,
+			(char *)&opt, sizeof(opt));
+	if (ret < 0) {
+		pr_err("kernel_setsockopt() for IP_FREEBIND"
+			" failed\n");
+		goto fail;
+	}
+
+	ret = kernel_bind(sock, (struct sockaddr *)&np->np_sockaddr, len);
+	if (ret < 0) {
+		pr_err("kernel_bind() failed: %d\n", ret);
+		goto fail;
+	}
+
+	ret = kernel_listen(sock, backlog);
+	if (ret != 0) {
+		pr_err("kernel_listen() failed: %d\n", ret);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	np->np_socket = NULL;
+	if (sock)
+		sock_release(sock);
+	return ret;
+}
+
+static int __iscsi_target_login_thread(struct iscsi_np *np)
+{
+	u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0;
+	int err, ret = 0, stop;
+	struct iscsi_conn *conn = NULL;
+	struct iscsi_login *login;
+	struct iscsi_portal_group *tpg = NULL;
+	struct socket *new_sock, *sock;
+	struct kvec iov;
+	struct iscsi_login_req *pdu;
+	struct sockaddr_in sock_in;
+	struct sockaddr_in6 sock_in6;
+
+	flush_signals(current);
+	sock = np->np_socket;
+
+	spin_lock_bh(&np->np_thread_lock);
+	if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+		np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+		complete(&np->np_restart_comp);
+	} else {
+		np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+	}
+	spin_unlock_bh(&np->np_thread_lock);
+
+	if (kernel_accept(sock, &new_sock, 0) < 0) {
+		spin_lock_bh(&np->np_thread_lock);
+		if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+			spin_unlock_bh(&np->np_thread_lock);
+			complete(&np->np_restart_comp);
+			/* Get another socket */
+			return 1;
+		}
+		spin_unlock_bh(&np->np_thread_lock);
+		goto out;
+	}
+	iscsi_start_login_thread_timer(np);
+
+	conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+	if (!conn) {
+		pr_err("Could not allocate memory for"
+			" new connection\n");
+		sock_release(new_sock);
+		/* Get another socket */
+		return 1;
+	}
+
+	pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
+	conn->conn_state = TARG_CONN_STATE_FREE;
+	conn->sock = new_sock;
+
+	pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
+	conn->conn_state = TARG_CONN_STATE_XPT_UP;
+
+	/*
+	 * Allocate conn->conn_ops early as a failure calling
+	 * iscsit_tx_login_rsp() below will call tx_data().
+	 */
+	conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
+	if (!conn->conn_ops) {
+		pr_err("Unable to allocate memory for"
+			" struct iscsi_conn_ops.\n");
+		goto new_sess_out;
+	}
+	/*
+	 * Perform the remaining iSCSI connection initialization items..
+	 */
+	if (iscsi_login_init_conn(conn) < 0)
+		goto new_sess_out;
+
+	memset(buffer, 0, ISCSI_HDR_LEN);
+	memset(&iov, 0, sizeof(struct kvec));
+	iov.iov_base	= buffer;
+	iov.iov_len	= ISCSI_HDR_LEN;
+
+	if (rx_data(conn, &iov, 1, ISCSI_HDR_LEN) <= 0) {
+		pr_err("rx_data() returned an error.\n");
+		goto new_sess_out;
+	}
+
+	iscsi_opcode = (buffer[0] & ISCSI_OPCODE_MASK);
+	if (!(iscsi_opcode & ISCSI_OP_LOGIN)) {
+		pr_err("First opcode is not login request,"
+			" failing login request.\n");
+		goto new_sess_out;
+	}
+
+	pdu			= (struct iscsi_login_req *) buffer;
+	pdu->cid		= be16_to_cpu(pdu->cid);
+	pdu->tsih		= be16_to_cpu(pdu->tsih);
+	pdu->itt		= be32_to_cpu(pdu->itt);
+	pdu->cmdsn		= be32_to_cpu(pdu->cmdsn);
+	pdu->exp_statsn		= be32_to_cpu(pdu->exp_statsn);
+	/*
+	 * Used by iscsit_tx_login_rsp() for Login Resonses PDUs
+	 * when Status-Class != 0.
+	*/
+	conn->login_itt		= pdu->itt;
+
+	spin_lock_bh(&np->np_thread_lock);
+	if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
+		spin_unlock_bh(&np->np_thread_lock);
+		pr_err("iSCSI Network Portal on %s:%hu currently not"
+			" active.\n", np->np_ip, np->np_port);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+		goto new_sess_out;
+	}
+	spin_unlock_bh(&np->np_thread_lock);
+
+	if (np->np_sockaddr.ss_family == AF_INET6) {
+		memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
+
+		if (conn->sock->ops->getname(conn->sock,
+				(struct sockaddr *)&sock_in6, &err, 1) < 0) {
+			pr_err("sock_ops->getname() failed.\n");
+			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+					ISCSI_LOGIN_STATUS_TARGET_ERROR);
+			goto new_sess_out;
+		}
+		snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
+				&sock_in6.sin6_addr.in6_u);
+		conn->login_port = ntohs(sock_in6.sin6_port);
+
+		if (conn->sock->ops->getname(conn->sock,
+				(struct sockaddr *)&sock_in6, &err, 0) < 0) {
+			pr_err("sock_ops->getname() failed.\n");
+			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+					ISCSI_LOGIN_STATUS_TARGET_ERROR);
+			goto new_sess_out;
+		}
+		snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
+				&sock_in6.sin6_addr.in6_u);
+		conn->local_port = ntohs(sock_in6.sin6_port);
+
+	} else {
+		memset(&sock_in, 0, sizeof(struct sockaddr_in));
+
+		if (conn->sock->ops->getname(conn->sock,
+				(struct sockaddr *)&sock_in, &err, 1) < 0) {
+			pr_err("sock_ops->getname() failed.\n");
+			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+					ISCSI_LOGIN_STATUS_TARGET_ERROR);
+			goto new_sess_out;
+		}
+		sprintf(conn->login_ip, "%pI4", &sock_in.sin_addr.s_addr);
+		conn->login_port = ntohs(sock_in.sin_port);
+
+		if (conn->sock->ops->getname(conn->sock,
+				(struct sockaddr *)&sock_in, &err, 0) < 0) {
+			pr_err("sock_ops->getname() failed.\n");
+			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+					ISCSI_LOGIN_STATUS_TARGET_ERROR);
+			goto new_sess_out;
+		}
+		sprintf(conn->local_ip, "%pI4", &sock_in.sin_addr.s_addr);
+		conn->local_port = ntohs(sock_in.sin_port);
+	}
+
+	conn->network_transport = np->np_network_transport;
+
+	pr_debug("Received iSCSI login request from %s on %s Network"
+			" Portal %s:%hu\n", conn->login_ip,
+		(conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP",
+			conn->local_ip, conn->local_port);
+
+	pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n");
+	conn->conn_state	= TARG_CONN_STATE_IN_LOGIN;
+
+	if (iscsi_login_check_initiator_version(conn, pdu->max_version,
+			pdu->min_version) < 0)
+		goto new_sess_out;
+
+	zero_tsih = (pdu->tsih == 0x0000);
+	if ((zero_tsih)) {
+		/*
+		 * This is the leading connection of a new session.
+		 * We wait until after authentication to check for
+		 * session reinstatement.
+		 */
+		if (iscsi_login_zero_tsih_s1(conn, buffer) < 0)
+			goto new_sess_out;
+	} else {
+		/*
+		 * Add a new connection to an existing session.
+		 * We check for a non-existant session in
+		 * iscsi_login_non_zero_tsih_s2() below based
+		 * on ISID/TSIH, but wait until after authentication
+		 * to check for connection reinstatement, etc.
+		 */
+		if (iscsi_login_non_zero_tsih_s1(conn, buffer) < 0)
+			goto new_sess_out;
+	}
+
+	/*
+	 * This will process the first login request, and call
+	 * iscsi_target_locate_portal(), and return a valid struct iscsi_login.
+	 */
+	login = iscsi_target_init_negotiation(np, conn, buffer);
+	if (!login) {
+		tpg = conn->tpg;
+		goto new_sess_out;
+	}
+
+	tpg = conn->tpg;
+	if (!tpg) {
+		pr_err("Unable to locate struct iscsi_conn->tpg\n");
+		goto new_sess_out;
+	}
+
+	if (zero_tsih) {
+		if (iscsi_login_zero_tsih_s2(conn) < 0) {
+			iscsi_target_nego_release(login, conn);
+			goto new_sess_out;
+		}
+	} else {
+		if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0) {
+			iscsi_target_nego_release(login, conn);
+			goto old_sess_out;
+		}
+	}
+
+	if (iscsi_target_start_negotiation(login, conn) < 0)
+		goto new_sess_out;
+
+	if (!conn->sess) {
+		pr_err("struct iscsi_conn session pointer is NULL!\n");
+		goto new_sess_out;
+	}
+
+	iscsi_stop_login_thread_timer(np);
+
+	if (signal_pending(current))
+		goto new_sess_out;
+
+	ret = iscsi_post_login_handler(np, conn, zero_tsih);
+
+	if (ret < 0)
+		goto new_sess_out;
+
+	iscsit_deaccess_np(np, tpg);
+	tpg = NULL;
+	/* Get another socket */
+	return 1;
+
+new_sess_out:
+	pr_err("iSCSI Login negotiation failed.\n");
+	iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				  ISCSI_LOGIN_STATUS_INIT_ERR);
+	if (!zero_tsih || !conn->sess)
+		goto old_sess_out;
+	if (conn->sess->se_sess)
+		transport_free_session(conn->sess->se_sess);
+	if (conn->sess->session_index != 0) {
+		spin_lock_bh(&sess_idr_lock);
+		idr_remove(&sess_idr, conn->sess->session_index);
+		spin_unlock_bh(&sess_idr_lock);
+	}
+	if (conn->sess->sess_ops)
+		kfree(conn->sess->sess_ops);
+	if (conn->sess)
+		kfree(conn->sess);
+old_sess_out:
+	iscsi_stop_login_thread_timer(np);
+	/*
+	 * If login negotiation fails check if the Time2Retain timer
+	 * needs to be restarted.
+	 */
+	if (!zero_tsih && conn->sess) {
+		spin_lock_bh(&conn->sess->conn_lock);
+		if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
+			struct se_portal_group *se_tpg =
+					&ISCSI_TPG_C(conn)->tpg_se_tpg;
+
+			atomic_set(&conn->sess->session_continuation, 0);
+			spin_unlock_bh(&conn->sess->conn_lock);
+			spin_lock_bh(&se_tpg->session_lock);
+			iscsit_start_time2retain_handler(conn->sess);
+			spin_unlock_bh(&se_tpg->session_lock);
+		} else
+			spin_unlock_bh(&conn->sess->conn_lock);
+		iscsit_dec_session_usage_count(conn->sess);
+	}
+
+	if (!IS_ERR(conn->conn_rx_hash.tfm))
+		crypto_free_hash(conn->conn_rx_hash.tfm);
+	if (!IS_ERR(conn->conn_tx_hash.tfm))
+		crypto_free_hash(conn->conn_tx_hash.tfm);
+
+	if (conn->conn_cpumask)
+		free_cpumask_var(conn->conn_cpumask);
+
+	kfree(conn->conn_ops);
+
+	if (conn->param_list) {
+		iscsi_release_param_list(conn->param_list);
+		conn->param_list = NULL;
+	}
+	if (conn->sock)
+		sock_release(conn->sock);
+	kfree(conn);
+
+	if (tpg) {
+		iscsit_deaccess_np(np, tpg);
+		tpg = NULL;
+	}
+
+out:
+	stop = kthread_should_stop();
+	if (!stop && signal_pending(current)) {
+		spin_lock_bh(&np->np_thread_lock);
+		stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
+		spin_unlock_bh(&np->np_thread_lock);
+	}
+	/* Wait for another socket.. */
+	if (!stop)
+		return 1;
+
+	iscsi_stop_login_thread_timer(np);
+	spin_lock_bh(&np->np_thread_lock);
+	np->np_thread_state = ISCSI_NP_THREAD_EXIT;
+	spin_unlock_bh(&np->np_thread_lock);
+	return 0;
+}
+
+int iscsi_target_login_thread(void *arg)
+{
+	struct iscsi_np *np = arg;
+	int ret;
+
+	allow_signal(SIGINT);
+
+	while (!kthread_should_stop()) {
+		ret = __iscsi_target_login_thread(np);
+		/*
+		 * We break and exit here unless another sock_accept() call
+		 * is expected.
+		 */
+		if (ret != 1)
+			break;
+	}
+
+	return 0;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_login.h b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_login.h
new file mode 100644
index 0000000..091dcae
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_login.h
@@ -0,0 +1,12 @@
+#ifndef ISCSI_TARGET_LOGIN_H
+#define ISCSI_TARGET_LOGIN_H
+
+extern int iscsi_login_setup_crypto(struct iscsi_conn *);
+extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
+extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
+extern int iscsi_target_setup_login_socket(struct iscsi_np *,
+				struct __kernel_sockaddr_storage *);
+extern int iscsi_target_login_thread(void *);
+extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *);
+
+#endif   /*** ISCSI_TARGET_LOGIN_H ***/
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_nego.c b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_nego.c
new file mode 100644
index 0000000..89f8909
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_nego.c
@@ -0,0 +1,1066 @@
+/*******************************************************************************
+ * This file contains main functions related to iSCSI Parameter negotiation.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/ctype.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_login.h"
+#include "iscsi_target_nego.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_auth.h"
+
+#define MAX_LOGIN_PDUS  7
+#define TEXT_LEN	4096
+
+void convert_null_to_semi(char *buf, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		if (buf[i] == '\0')
+			buf[i] = ';';
+}
+
+int strlen_semi(char *buf)
+{
+	int i = 0;
+
+	while (buf[i] != '\0') {
+		if (buf[i] == ';')
+			return i;
+		i++;
+	}
+
+	return -1;
+}
+
+int extract_param(
+	const char *in_buf,
+	const char *pattern,
+	unsigned int max_length,
+	char *out_buf,
+	unsigned char *type)
+{
+	char *ptr;
+	int len;
+
+	if (!in_buf || !pattern || !out_buf || !type)
+		return -1;
+
+	ptr = strstr(in_buf, pattern);
+	if (!ptr)
+		return -1;
+
+	ptr = strstr(ptr, "=");
+	if (!ptr)
+		return -1;
+
+	ptr += 1;
+	if (*ptr == '0' && (*(ptr+1) == 'x' || *(ptr+1) == 'X')) {
+		ptr += 2; /* skip 0x */
+		*type = HEX;
+	} else
+		*type = DECIMAL;
+
+	len = strlen_semi(ptr);
+	if (len < 0)
+		return -1;
+
+	if (len >= max_length) {
+		pr_err("Length of input: %d exceeds max_length:"
+			" %d\n", len, max_length);
+		return -1;
+	}
+	memcpy(out_buf, ptr, len);
+	out_buf[len] = '\0';
+
+	return 0;
+}
+
+static u32 iscsi_handle_authentication(
+	struct iscsi_conn *conn,
+	char *in_buf,
+	char *out_buf,
+	int in_length,
+	int *out_length,
+	unsigned char *authtype)
+{
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_auth *auth;
+	struct iscsi_node_acl *iscsi_nacl;
+	struct se_node_acl *se_nacl;
+
+	if (!sess->sess_ops->SessionType) {
+		/*
+		 * For SessionType=Normal
+		 */
+		se_nacl = conn->sess->se_sess->se_node_acl;
+		if (!se_nacl) {
+			pr_err("Unable to locate struct se_node_acl for"
+					" CHAP auth\n");
+			return -1;
+		}
+		iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
+				se_node_acl);
+		if (!iscsi_nacl) {
+			pr_err("Unable to locate struct iscsi_node_acl for"
+					" CHAP auth\n");
+			return -1;
+		}
+
+		auth = ISCSI_NODE_AUTH(iscsi_nacl);
+	} else {
+		/*
+		 * For SessionType=Discovery
+		 */
+		auth = &iscsit_global->discovery_acl.node_auth;
+	}
+
+	if (strstr("CHAP", authtype))
+		strcpy(conn->sess->auth_type, "CHAP");
+	else
+		strcpy(conn->sess->auth_type, NONE);
+
+	if (strstr("None", authtype))
+		return 1;
+#ifdef CANSRP
+	else if (strstr("SRP", authtype))
+		return srp_main_loop(conn, auth, in_buf, out_buf,
+				&in_length, out_length);
+#endif
+	else if (strstr("CHAP", authtype))
+		return chap_main_loop(conn, auth, in_buf, out_buf,
+				&in_length, out_length);
+	else if (strstr("SPKM1", authtype))
+		return 2;
+	else if (strstr("SPKM2", authtype))
+		return 2;
+	else if (strstr("KRB5", authtype))
+		return 2;
+	else
+		return 2;
+}
+
+static void iscsi_remove_failed_auth_entry(struct iscsi_conn *conn)
+{
+	kfree(conn->auth_protocol);
+}
+
+static int iscsi_target_check_login_request(
+	struct iscsi_conn *conn,
+	struct iscsi_login *login)
+{
+	int req_csg, req_nsg;
+	u32 payload_length;
+	struct iscsi_login_req *login_req;
+
+	login_req = (struct iscsi_login_req *) login->req;
+	payload_length = ntoh24(login_req->dlength);
+
+	switch (login_req->opcode & ISCSI_OPCODE_MASK) {
+	case ISCSI_OP_LOGIN:
+		break;
+	default:
+		pr_err("Received unknown opcode 0x%02x.\n",
+				login_req->opcode & ISCSI_OPCODE_MASK);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_INIT_ERR);
+		return -1;
+	}
+
+	if ((login_req->flags & ISCSI_FLAG_LOGIN_CONTINUE) &&
+	    (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
+		pr_err("Login request has both ISCSI_FLAG_LOGIN_CONTINUE"
+			" and ISCSI_FLAG_LOGIN_TRANSIT set, protocol error.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_INIT_ERR);
+		return -1;
+	}
+
+	req_csg = (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
+	req_nsg = (login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK);
+
+	if (req_csg != login->current_stage) {
+		pr_err("Initiator unexpectedly changed login stage"
+			" from %d to %d, login failed.\n", login->current_stage,
+			req_csg);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_INIT_ERR);
+		return -1;
+	}
+
+	if ((req_nsg == 2) || (req_csg >= 2) ||
+	   ((login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT) &&
+	    (req_nsg <= req_csg))) {
+		pr_err("Illegal login_req->flags Combination, CSG: %d,"
+			" NSG: %d, ISCSI_FLAG_LOGIN_TRANSIT: %d.\n", req_csg,
+			req_nsg, (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT));
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_INIT_ERR);
+		return -1;
+	}
+
+	if ((login_req->max_version != login->version_max) ||
+	    (login_req->min_version != login->version_min)) {
+		pr_err("Login request changed Version Max/Nin"
+			" unexpectedly to 0x%02x/0x%02x, protocol error\n",
+			login_req->max_version, login_req->min_version);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_INIT_ERR);
+		return -1;
+	}
+
+	if (memcmp(login_req->isid, login->isid, 6) != 0) {
+		pr_err("Login request changed ISID unexpectedly,"
+				" protocol error.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_INIT_ERR);
+		return -1;
+	}
+
+	if (login_req->itt != login->init_task_tag) {
+		pr_err("Login request changed ITT unexpectedly to"
+			" 0x%08x, protocol error.\n", login_req->itt);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_INIT_ERR);
+		return -1;
+	}
+
+	if (payload_length > MAX_KEY_VALUE_PAIRS) {
+		pr_err("Login request payload exceeds default"
+			" MaxRecvDataSegmentLength: %u, protocol error.\n",
+				MAX_KEY_VALUE_PAIRS);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int iscsi_target_check_first_request(
+	struct iscsi_conn *conn,
+	struct iscsi_login *login)
+{
+	struct iscsi_param *param = NULL;
+	struct se_node_acl *se_nacl;
+
+	login->first_request = 0;
+
+	list_for_each_entry(param, &conn->param_list->param_list, p_list) {
+		if (!strncmp(param->name, SESSIONTYPE, 11)) {
+			if (!IS_PSTATE_ACCEPTOR(param)) {
+				pr_err("SessionType key not received"
+					" in first login request.\n");
+				iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+					ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+				return -1;
+			}
+			if (!strncmp(param->value, DISCOVERY, 9))
+				return 0;
+		}
+
+		if (!strncmp(param->name, INITIATORNAME, 13)) {
+			if (!IS_PSTATE_ACCEPTOR(param)) {
+				if (!login->leading_connection)
+					continue;
+
+				pr_err("InitiatorName key not received"
+					" in first login request.\n");
+				iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+					ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+				return -1;
+			}
+
+			/*
+			 * For non-leading connections, double check that the
+			 * received InitiatorName matches the existing session's
+			 * struct iscsi_node_acl.
+			 */
+			if (!login->leading_connection) {
+				se_nacl = conn->sess->se_sess->se_node_acl;
+				if (!se_nacl) {
+					pr_err("Unable to locate"
+						" struct se_node_acl\n");
+					iscsit_tx_login_rsp(conn,
+							ISCSI_STATUS_CLS_INITIATOR_ERR,
+							ISCSI_LOGIN_STATUS_TGT_NOT_FOUND);
+					return -1;
+				}
+
+				if (strcmp(param->value,
+						se_nacl->initiatorname)) {
+					pr_err("Incorrect"
+						" InitiatorName: %s for this"
+						" iSCSI Initiator Node.\n",
+						param->value);
+					iscsit_tx_login_rsp(conn,
+							ISCSI_STATUS_CLS_INITIATOR_ERR,
+							ISCSI_LOGIN_STATUS_TGT_NOT_FOUND);
+					return -1;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+	u32 padding = 0;
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_login_rsp *login_rsp;
+
+	login_rsp = (struct iscsi_login_rsp *) login->rsp;
+
+	login_rsp->opcode		= ISCSI_OP_LOGIN_RSP;
+	hton24(login_rsp->dlength, login->rsp_length);
+	memcpy(login_rsp->isid, login->isid, 6);
+	login_rsp->tsih			= cpu_to_be16(login->tsih);
+	login_rsp->itt			= cpu_to_be32(login->init_task_tag);
+	login_rsp->statsn		= cpu_to_be32(conn->stat_sn++);
+	login_rsp->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	login_rsp->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+
+	pr_debug("Sending Login Response, Flags: 0x%02x, ITT: 0x%08x,"
+		" ExpCmdSN; 0x%08x, MaxCmdSN: 0x%08x, StatSN: 0x%08x, Length:"
+		" %u\n", login_rsp->flags, ntohl(login_rsp->itt),
+		ntohl(login_rsp->exp_cmdsn), ntohl(login_rsp->max_cmdsn),
+		ntohl(login_rsp->statsn), login->rsp_length);
+
+	padding = ((-login->rsp_length) & 3);
+
+	if (iscsi_login_tx_data(
+			conn,
+			login->rsp,
+			login->rsp_buf,
+			login->rsp_length + padding) < 0)
+		return -1;
+
+	login->rsp_length		= 0;
+	login_rsp->tsih			= be16_to_cpu(login_rsp->tsih);
+	login_rsp->itt			= be32_to_cpu(login_rsp->itt);
+	login_rsp->statsn		= be32_to_cpu(login_rsp->statsn);
+	mutex_lock(&sess->cmdsn_mutex);
+	login_rsp->exp_cmdsn		= be32_to_cpu(sess->exp_cmd_sn);
+	login_rsp->max_cmdsn		= be32_to_cpu(sess->max_cmd_sn);
+	mutex_unlock(&sess->cmdsn_mutex);
+
+	return 0;
+}
+
+static int iscsi_target_do_rx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+	u32 padding = 0, payload_length;
+	struct iscsi_login_req *login_req;
+
+	if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0)
+		return -1;
+
+	login_req = (struct iscsi_login_req *) login->req;
+	payload_length			= ntoh24(login_req->dlength);
+	login_req->tsih			= be16_to_cpu(login_req->tsih);
+	login_req->itt			= be32_to_cpu(login_req->itt);
+	login_req->cid			= be16_to_cpu(login_req->cid);
+	login_req->cmdsn		= be32_to_cpu(login_req->cmdsn);
+	login_req->exp_statsn		= be32_to_cpu(login_req->exp_statsn);
+
+	pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
+		" CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
+		 login_req->flags, login_req->itt, login_req->cmdsn,
+		 login_req->exp_statsn, login_req->cid, payload_length);
+
+	if (iscsi_target_check_login_request(conn, login) < 0)
+		return -1;
+
+	padding = ((-payload_length) & 3);
+	memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
+
+	if (iscsi_login_rx_data(
+			conn,
+			login->req_buf,
+			payload_length + padding) < 0)
+		return -1;
+
+	return 0;
+}
+
+static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+	if (iscsi_target_do_tx_login_io(conn, login) < 0)
+		return -1;
+
+	if (iscsi_target_do_rx_login_io(conn, login) < 0)
+		return -1;
+
+	return 0;
+}
+
+static int iscsi_target_get_initial_payload(
+	struct iscsi_conn *conn,
+	struct iscsi_login *login)
+{
+	u32 padding = 0, payload_length;
+	struct iscsi_login_req *login_req;
+
+	login_req = (struct iscsi_login_req *) login->req;
+	payload_length = ntoh24(login_req->dlength);
+
+	pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
+		" CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
+		login_req->flags, login_req->itt, login_req->cmdsn,
+		login_req->exp_statsn, payload_length);
+
+	if (iscsi_target_check_login_request(conn, login) < 0)
+		return -1;
+
+	padding = ((-payload_length) & 3);
+
+	if (iscsi_login_rx_data(
+			conn,
+			login->req_buf,
+			payload_length + padding) < 0)
+		return -1;
+
+	return 0;
+}
+
+/*
+ *	NOTE: We check for existing sessions or connections AFTER the initiator
+ *	has been successfully authenticated in order to protect against faked
+ *	ISID/TSIH combinations.
+ */
+static int iscsi_target_check_for_existing_instances(
+	struct iscsi_conn *conn,
+	struct iscsi_login *login)
+{
+	if (login->checked_for_existing)
+		return 0;
+
+	login->checked_for_existing = 1;
+
+	if (!login->tsih)
+		return iscsi_check_for_session_reinstatement(conn);
+	else
+		return iscsi_login_post_auth_non_zero_tsih(conn, login->cid,
+				login->initial_exp_statsn);
+}
+
+static int iscsi_target_do_authentication(
+	struct iscsi_conn *conn,
+	struct iscsi_login *login)
+{
+	int authret;
+	u32 payload_length;
+	struct iscsi_param *param;
+	struct iscsi_login_req *login_req;
+	struct iscsi_login_rsp *login_rsp;
+
+	login_req = (struct iscsi_login_req *) login->req;
+	login_rsp = (struct iscsi_login_rsp *) login->rsp;
+	payload_length = ntoh24(login_req->dlength);
+
+	param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
+	if (!param)
+		return -1;
+
+	authret = iscsi_handle_authentication(
+			conn,
+			login->req_buf,
+			login->rsp_buf,
+			payload_length,
+			&login->rsp_length,
+			param->value);
+	switch (authret) {
+	case 0:
+		pr_debug("Received OK response"
+		" from LIO Authentication, continuing.\n");
+		break;
+	case 1:
+		pr_debug("iSCSI security negotiation"
+			" completed successfully.\n");
+		login->auth_complete = 1;
+		if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
+		    (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
+			login_rsp->flags |= (ISCSI_FLAG_LOGIN_NEXT_STAGE1 |
+					     ISCSI_FLAG_LOGIN_TRANSIT);
+			login->current_stage = 1;
+		}
+		return iscsi_target_check_for_existing_instances(
+				conn, login);
+	case 2:
+		pr_err("Security negotiation"
+			" failed.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_AUTH_FAILED);
+		return -1;
+	default:
+		pr_err("Received unknown error %d from LIO"
+				" Authentication\n", authret);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_TARGET_ERROR);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int iscsi_target_handle_csg_zero(
+	struct iscsi_conn *conn,
+	struct iscsi_login *login)
+{
+	int ret;
+	u32 payload_length;
+	struct iscsi_param *param;
+	struct iscsi_login_req *login_req;
+	struct iscsi_login_rsp *login_rsp;
+
+	login_req = (struct iscsi_login_req *) login->req;
+	login_rsp = (struct iscsi_login_rsp *) login->rsp;
+	payload_length = ntoh24(login_req->dlength);
+
+	param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
+	if (!param)
+		return -1;
+
+	ret = iscsi_decode_text_input(
+			PHASE_SECURITY|PHASE_DECLARATIVE,
+			SENDER_INITIATOR|SENDER_RECEIVER,
+			login->req_buf,
+			payload_length,
+			conn->param_list);
+	if (ret < 0)
+		return -1;
+
+	if (ret > 0) {
+		if (login->auth_complete) {
+			pr_err("Initiator has already been"
+				" successfully authenticated, but is still"
+				" sending %s keys.\n", param->value);
+			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+					ISCSI_LOGIN_STATUS_INIT_ERR);
+			return -1;
+		}
+
+		goto do_auth;
+	}
+
+	if (login->first_request)
+		if (iscsi_target_check_first_request(conn, login) < 0)
+			return -1;
+
+	ret = iscsi_encode_text_output(
+			PHASE_SECURITY|PHASE_DECLARATIVE,
+			SENDER_TARGET,
+			login->rsp_buf,
+			&login->rsp_length,
+			conn->param_list);
+	if (ret < 0)
+		return -1;
+
+	if (!iscsi_check_negotiated_keys(conn->param_list)) {
+		if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
+		    !strncmp(param->value, NONE, 4)) {
+			pr_err("Initiator sent AuthMethod=None but"
+				" Target is enforcing iSCSI Authentication,"
+					" login failed.\n");
+			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+					ISCSI_LOGIN_STATUS_AUTH_FAILED);
+			return -1;
+		}
+
+		if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
+		    !login->auth_complete)
+			return 0;
+
+		if (strncmp(param->value, NONE, 4) && !login->auth_complete)
+			return 0;
+
+		if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
+		    (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
+			login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE1 |
+					    ISCSI_FLAG_LOGIN_TRANSIT;
+			login->current_stage = 1;
+		}
+	}
+
+	return 0;
+do_auth:
+	return iscsi_target_do_authentication(conn, login);
+}
+
+static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+	int ret;
+	u32 payload_length;
+	struct iscsi_login_req *login_req;
+	struct iscsi_login_rsp *login_rsp;
+
+	login_req = (struct iscsi_login_req *) login->req;
+	login_rsp = (struct iscsi_login_rsp *) login->rsp;
+	payload_length = ntoh24(login_req->dlength);
+
+	ret = iscsi_decode_text_input(
+			PHASE_OPERATIONAL|PHASE_DECLARATIVE,
+			SENDER_INITIATOR|SENDER_RECEIVER,
+			login->req_buf,
+			payload_length,
+			conn->param_list);
+	if (ret < 0) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_INIT_ERR);
+		return -1;
+	}
+
+	if (login->first_request)
+		if (iscsi_target_check_first_request(conn, login) < 0)
+			return -1;
+
+	if (iscsi_target_check_for_existing_instances(conn, login) < 0)
+		return -1;
+
+	ret = iscsi_encode_text_output(
+			PHASE_OPERATIONAL|PHASE_DECLARATIVE,
+			SENDER_TARGET,
+			login->rsp_buf,
+			&login->rsp_length,
+			conn->param_list);
+	if (ret < 0) {
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_INIT_ERR);
+		return -1;
+	}
+
+	if (!login->auth_complete &&
+	     ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
+		pr_err("Initiator is requesting CSG: 1, has not been"
+			 " successfully authenticated, and the Target is"
+			" enforcing iSCSI Authentication, login failed.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_AUTH_FAILED);
+		return -1;
+	}
+
+	if (!iscsi_check_negotiated_keys(conn->param_list))
+		if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE3) &&
+		    (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT))
+			login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE3 |
+					    ISCSI_FLAG_LOGIN_TRANSIT;
+
+	return 0;
+}
+
+static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+	int pdu_count = 0;
+	struct iscsi_login_req *login_req;
+	struct iscsi_login_rsp *login_rsp;
+
+	login_req = (struct iscsi_login_req *) login->req;
+	login_rsp = (struct iscsi_login_rsp *) login->rsp;
+
+	while (1) {
+		if (++pdu_count > MAX_LOGIN_PDUS) {
+			pr_err("MAX_LOGIN_PDUS count reached.\n");
+			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+					ISCSI_LOGIN_STATUS_TARGET_ERROR);
+			return -1;
+		}
+
+		switch ((login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2) {
+		case 0:
+			login_rsp->flags |= (0 & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK);
+			if (iscsi_target_handle_csg_zero(conn, login) < 0)
+				return -1;
+			break;
+		case 1:
+			login_rsp->flags |= ISCSI_FLAG_LOGIN_CURRENT_STAGE1;
+			if (iscsi_target_handle_csg_one(conn, login) < 0)
+				return -1;
+			if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
+				login->tsih = conn->sess->tsih;
+				if (iscsi_target_do_tx_login_io(conn,
+						login) < 0)
+					return -1;
+				return 0;
+			}
+			break;
+		default:
+			pr_err("Illegal CSG: %d received from"
+				" Initiator, protocol error.\n",
+				(login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
+				>> 2);
+			break;
+		}
+
+		if (iscsi_target_do_login_io(conn, login) < 0)
+			return -1;
+
+		if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
+			login_rsp->flags &= ~ISCSI_FLAG_LOGIN_TRANSIT;
+			login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK;
+		}
+	}
+
+	return 0;
+}
+
+static void iscsi_initiatorname_tolower(
+	char *param_buf)
+{
+	char *c;
+	u32 iqn_size = strlen(param_buf), i;
+
+	for (i = 0; i < iqn_size; i++) {
+		c = &param_buf[i];
+		if (!isupper(*c))
+			continue;
+
+		*c = tolower(*c);
+	}
+}
+
+/*
+ * Processes the first Login Request..
+ */
+static int iscsi_target_locate_portal(
+	struct iscsi_np *np,
+	struct iscsi_conn *conn,
+	struct iscsi_login *login)
+{
+	char *i_buf = NULL, *s_buf = NULL, *t_buf = NULL;
+	char *tmpbuf, *start = NULL, *end = NULL, *key, *value;
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_tiqn *tiqn;
+	struct iscsi_login_req *login_req;
+	u32 payload_length;
+	int sessiontype = 0, ret = 0;
+
+	login_req = (struct iscsi_login_req *) login->req;
+	payload_length = ntoh24(login_req->dlength);
+
+	login->first_request	= 1;
+	login->leading_connection = (!login_req->tsih) ? 1 : 0;
+	login->current_stage	=
+		(login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
+	login->version_min	= login_req->min_version;
+	login->version_max	= login_req->max_version;
+	memcpy(login->isid, login_req->isid, 6);
+	login->cmd_sn		= login_req->cmdsn;
+	login->init_task_tag	= login_req->itt;
+	login->initial_exp_statsn = login_req->exp_statsn;
+	login->cid		= login_req->cid;
+	login->tsih		= login_req->tsih;
+
+	if (iscsi_target_get_initial_payload(conn, login) < 0)
+		return -1;
+
+	tmpbuf = kzalloc(payload_length + 1, GFP_KERNEL);
+	if (!tmpbuf) {
+		pr_err("Unable to allocate memory for tmpbuf.\n");
+		return -1;
+	}
+
+	memcpy(tmpbuf, login->req_buf, payload_length);
+	tmpbuf[payload_length] = '\0';
+	start = tmpbuf;
+	end = (start + payload_length);
+
+	/*
+	 * Locate the initial keys expected from the Initiator node in
+	 * the first login request in order to progress with the login phase.
+	 */
+	while (start < end) {
+		if (iscsi_extract_key_value(start, &key, &value) < 0) {
+			ret = -1;
+			goto out;
+		}
+
+		if (!strncmp(key, "InitiatorName", 13))
+			i_buf = value;
+		else if (!strncmp(key, "SessionType", 11))
+			s_buf = value;
+		else if (!strncmp(key, "TargetName", 10))
+			t_buf = value;
+
+		start += strlen(key) + strlen(value) + 2;
+	}
+
+	/*
+	 * See 5.3.  Login Phase.
+	 */
+	if (!i_buf) {
+		pr_err("InitiatorName key not received"
+			" in first login request.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+			ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+		ret = -1;
+		goto out;
+	}
+	/*
+	 * Convert the incoming InitiatorName to lowercase following
+	 * RFC-3720 3.2.6.1. section c) that says that iSCSI IQNs
+	 * are NOT case sensitive.
+	 */
+	iscsi_initiatorname_tolower(i_buf);
+
+	if (!s_buf) {
+		if (!login->leading_connection)
+			goto get_target;
+
+		pr_err("SessionType key not received"
+			" in first login request.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+			ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+		ret = -1;
+		goto out;
+	}
+
+	/*
+	 * Use default portal group for discovery sessions.
+	 */
+	sessiontype = strncmp(s_buf, DISCOVERY, 9);
+	if (!sessiontype) {
+		conn->tpg = iscsit_global->discovery_tpg;
+		if (!login->leading_connection)
+			goto get_target;
+
+		sess->sess_ops->SessionType = 1;
+		/*
+		 * Setup crc32c modules from libcrypto
+		 */
+		if (iscsi_login_setup_crypto(conn) < 0) {
+			pr_err("iscsi_login_setup_crypto() failed\n");
+			ret = -1;
+			goto out;
+		}
+		/*
+		 * Serialize access across the discovery struct iscsi_portal_group to
+		 * process login attempt.
+		 */
+		if (iscsit_access_np(np, conn->tpg) < 0) {
+			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+			ret = -1;
+			goto out;
+		}
+		ret = 0;
+		goto out;
+	}
+
+get_target:
+	if (!t_buf) {
+		pr_err("TargetName key not received"
+			" in first login request while"
+			" SessionType=Normal.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+			ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+		ret = -1;
+		goto out;
+	}
+
+	/*
+	 * Locate Target IQN from Storage Node.
+	 */
+	tiqn = iscsit_get_tiqn_for_login(t_buf);
+	if (!tiqn) {
+		pr_err("Unable to locate Target IQN: %s in"
+			" Storage Node\n", t_buf);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+		ret = -1;
+		goto out;
+	}
+	pr_debug("Located Storage Object: %s\n", tiqn->tiqn);
+
+	/*
+	 * Locate Target Portal Group from Storage Node.
+	 */
+	conn->tpg = iscsit_get_tpg_from_np(tiqn, np);
+	if (!conn->tpg) {
+		pr_err("Unable to locate Target Portal Group"
+				" on %s\n", tiqn->tiqn);
+		iscsit_put_tiqn_for_login(tiqn);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+		ret = -1;
+		goto out;
+	}
+	pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt);
+	/*
+	 * Setup crc32c modules from libcrypto
+	 */
+	if (iscsi_login_setup_crypto(conn) < 0) {
+		pr_err("iscsi_login_setup_crypto() failed\n");
+		ret = -1;
+		goto out;
+	}
+	/*
+	 * Serialize access across the struct iscsi_portal_group to
+	 * process login attempt.
+	 */
+	if (iscsit_access_np(np, conn->tpg) < 0) {
+		iscsit_put_tiqn_for_login(tiqn);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+		ret = -1;
+		conn->tpg = NULL;
+		goto out;
+	}
+
+	/*
+	 * conn->sess->node_acl will be set when the referenced
+	 * struct iscsi_session is located from received ISID+TSIH in
+	 * iscsi_login_non_zero_tsih_s2().
+	 */
+	if (!login->leading_connection) {
+		ret = 0;
+		goto out;
+	}
+
+	/*
+	 * This value is required in iscsi_login_zero_tsih_s2()
+	 */
+	sess->sess_ops->SessionType = 0;
+
+	/*
+	 * Locate incoming Initiator IQN reference from Storage Node.
+	 */
+	sess->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
+			&conn->tpg->tpg_se_tpg, i_buf);
+	if (!sess->se_sess->se_node_acl) {
+		pr_err("iSCSI Initiator Node: %s is not authorized to"
+			" access iSCSI target portal group: %hu.\n",
+				i_buf, conn->tpg->tpgt);
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+				ISCSI_LOGIN_STATUS_TGT_FORBIDDEN);
+		ret = -1;
+		goto out;
+	}
+
+	ret = 0;
+out:
+	kfree(tmpbuf);
+	return ret;
+}
+
+struct iscsi_login *iscsi_target_init_negotiation(
+	struct iscsi_np *np,
+	struct iscsi_conn *conn,
+	char *login_pdu)
+{
+	struct iscsi_login *login;
+
+	login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL);
+	if (!login) {
+		pr_err("Unable to allocate memory for struct iscsi_login.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		return NULL;
+	}
+
+	login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
+	if (!login->req) {
+		pr_err("Unable to allocate memory for Login Request.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		goto out;
+	}
+
+	login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
+	if (!login->req_buf) {
+		pr_err("Unable to allocate memory for response buffer.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		goto out;
+	}
+	/*
+	 * SessionType: Discovery
+	 *
+	 *	Locates Default Portal
+	 *
+	 * SessionType: Normal
+	 *
+	 *	Locates Target Portal from NP -> Target IQN
+	 */
+	if (iscsi_target_locate_portal(np, conn, login) < 0) {
+		pr_err("iSCSI Login negotiation failed.\n");
+		goto out;
+	}
+
+	return login;
+out:
+	kfree(login->req);
+	kfree(login->req_buf);
+	kfree(login);
+
+	return NULL;
+}
+
+int iscsi_target_start_negotiation(
+	struct iscsi_login *login,
+	struct iscsi_conn *conn)
+{
+	int ret = -1;
+
+	login->rsp = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+	if (!login->rsp) {
+		pr_err("Unable to allocate memory for"
+				" Login Response.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		ret = -1;
+		goto out;
+	}
+
+	login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
+	if (!login->rsp_buf) {
+		pr_err("Unable to allocate memory for"
+			" request buffer.\n");
+		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+		ret = -1;
+		goto out;
+	}
+
+	ret = iscsi_target_do_login(conn, login);
+out:
+	if (ret != 0)
+		iscsi_remove_failed_auth_entry(conn);
+
+	iscsi_target_nego_release(login, conn);
+	return ret;
+}
+
+void iscsi_target_nego_release(
+	struct iscsi_login *login,
+	struct iscsi_conn *conn)
+{
+	kfree(login->req);
+	kfree(login->rsp);
+	kfree(login->req_buf);
+	kfree(login->rsp_buf);
+	kfree(login);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_nego.h b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_nego.h
new file mode 100644
index 0000000..92e133a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_nego.h
@@ -0,0 +1,17 @@
+#ifndef ISCSI_TARGET_NEGO_H
+#define ISCSI_TARGET_NEGO_H
+
+#define DECIMAL         0
+#define HEX             1
+
+extern void convert_null_to_semi(char *, int);
+extern int extract_param(const char *, const char *, unsigned int, char *,
+		unsigned char *);
+extern struct iscsi_login *iscsi_target_init_negotiation(
+		struct iscsi_np *, struct iscsi_conn *, char *);
+extern int iscsi_target_start_negotiation(
+		struct iscsi_login *, struct iscsi_conn *);
+extern void iscsi_target_nego_release(
+		struct iscsi_login *, struct iscsi_conn *);
+
+#endif /* ISCSI_TARGET_NEGO_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_nodeattrib.c b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_nodeattrib.c
new file mode 100644
index 0000000..11dc293
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -0,0 +1,262 @@
+/*******************************************************************************
+ * This file contains the main functions related to Initiator Node Attributes.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <target/target_core_base.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_nodeattrib.h"
+
+static inline char *iscsit_na_get_initiatorname(
+	struct iscsi_node_acl *nacl)
+{
+	struct se_node_acl *se_nacl = &nacl->se_node_acl;
+
+	return &se_nacl->initiatorname[0];
+}
+
+void iscsit_set_default_node_attribues(
+	struct iscsi_node_acl *acl)
+{
+	struct iscsi_node_attrib *a = &acl->node_attrib;
+
+	a->dataout_timeout = NA_DATAOUT_TIMEOUT;
+	a->dataout_timeout_retries = NA_DATAOUT_TIMEOUT_RETRIES;
+	a->nopin_timeout = NA_NOPIN_TIMEOUT;
+	a->nopin_response_timeout = NA_NOPIN_RESPONSE_TIMEOUT;
+	a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS;
+	a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS;
+	a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS;
+	a->default_erl = NA_DEFAULT_ERL;
+}
+
+int iscsit_na_dataout_timeout(
+	struct iscsi_node_acl *acl,
+	u32 dataout_timeout)
+{
+	struct iscsi_node_attrib *a = &acl->node_attrib;
+
+	if (dataout_timeout > NA_DATAOUT_TIMEOUT_MAX) {
+		pr_err("Requested DataOut Timeout %u larger than"
+			" maximum %u\n", dataout_timeout,
+			NA_DATAOUT_TIMEOUT_MAX);
+		return -EINVAL;
+	} else if (dataout_timeout < NA_DATAOUT_TIMEOUT_MIX) {
+		pr_err("Requested DataOut Timeout %u smaller than"
+			" minimum %u\n", dataout_timeout,
+			NA_DATAOUT_TIMEOUT_MIX);
+		return -EINVAL;
+	}
+
+	a->dataout_timeout = dataout_timeout;
+	pr_debug("Set DataOut Timeout to %u for Initiator Node"
+		" %s\n", a->dataout_timeout, iscsit_na_get_initiatorname(acl));
+
+	return 0;
+}
+
+int iscsit_na_dataout_timeout_retries(
+	struct iscsi_node_acl *acl,
+	u32 dataout_timeout_retries)
+{
+	struct iscsi_node_attrib *a = &acl->node_attrib;
+
+	if (dataout_timeout_retries > NA_DATAOUT_TIMEOUT_RETRIES_MAX) {
+		pr_err("Requested DataOut Timeout Retries %u larger"
+			" than maximum %u", dataout_timeout_retries,
+				NA_DATAOUT_TIMEOUT_RETRIES_MAX);
+		return -EINVAL;
+	} else if (dataout_timeout_retries < NA_DATAOUT_TIMEOUT_RETRIES_MIN) {
+		pr_err("Requested DataOut Timeout Retries %u smaller"
+			" than minimum %u", dataout_timeout_retries,
+				NA_DATAOUT_TIMEOUT_RETRIES_MIN);
+		return -EINVAL;
+	}
+
+	a->dataout_timeout_retries = dataout_timeout_retries;
+	pr_debug("Set DataOut Timeout Retries to %u for"
+		" Initiator Node %s\n", a->dataout_timeout_retries,
+		iscsit_na_get_initiatorname(acl));
+
+	return 0;
+}
+
+int iscsit_na_nopin_timeout(
+	struct iscsi_node_acl *acl,
+	u32 nopin_timeout)
+{
+	struct iscsi_node_attrib *a = &acl->node_attrib;
+	struct iscsi_session *sess;
+	struct iscsi_conn *conn;
+	struct se_node_acl *se_nacl = &a->nacl->se_node_acl;
+	struct se_session *se_sess;
+	u32 orig_nopin_timeout = a->nopin_timeout;
+
+	if (nopin_timeout > NA_NOPIN_TIMEOUT_MAX) {
+		pr_err("Requested NopIn Timeout %u larger than maximum"
+			" %u\n", nopin_timeout, NA_NOPIN_TIMEOUT_MAX);
+		return -EINVAL;
+	} else if ((nopin_timeout < NA_NOPIN_TIMEOUT_MIN) &&
+		   (nopin_timeout != 0)) {
+		pr_err("Requested NopIn Timeout %u smaller than"
+			" minimum %u and not 0\n", nopin_timeout,
+			NA_NOPIN_TIMEOUT_MIN);
+		return -EINVAL;
+	}
+
+	a->nopin_timeout = nopin_timeout;
+	pr_debug("Set NopIn Timeout to %u for Initiator"
+		" Node %s\n", a->nopin_timeout,
+		iscsit_na_get_initiatorname(acl));
+	/*
+	 * Reenable disabled nopin_timeout timer for all iSCSI connections.
+	 */
+	if (!orig_nopin_timeout) {
+		spin_lock_bh(&se_nacl->nacl_sess_lock);
+		se_sess = se_nacl->nacl_sess;
+		if (se_sess) {
+			sess = se_sess->fabric_sess_ptr;
+
+			spin_lock(&sess->conn_lock);
+			list_for_each_entry(conn, &sess->sess_conn_list,
+					conn_list) {
+				if (conn->conn_state !=
+						TARG_CONN_STATE_LOGGED_IN)
+					continue;
+
+				spin_lock(&conn->nopin_timer_lock);
+				__iscsit_start_nopin_timer(conn);
+				spin_unlock(&conn->nopin_timer_lock);
+			}
+			spin_unlock(&sess->conn_lock);
+		}
+		spin_unlock_bh(&se_nacl->nacl_sess_lock);
+	}
+
+	return 0;
+}
+
+int iscsit_na_nopin_response_timeout(
+	struct iscsi_node_acl *acl,
+	u32 nopin_response_timeout)
+{
+	struct iscsi_node_attrib *a = &acl->node_attrib;
+
+	if (nopin_response_timeout > NA_NOPIN_RESPONSE_TIMEOUT_MAX) {
+		pr_err("Requested NopIn Response Timeout %u larger"
+			" than maximum %u\n", nopin_response_timeout,
+				NA_NOPIN_RESPONSE_TIMEOUT_MAX);
+		return -EINVAL;
+	} else if (nopin_response_timeout < NA_NOPIN_RESPONSE_TIMEOUT_MIN) {
+		pr_err("Requested NopIn Response Timeout %u smaller"
+			" than minimum %u\n", nopin_response_timeout,
+				NA_NOPIN_RESPONSE_TIMEOUT_MIN);
+		return -EINVAL;
+	}
+
+	a->nopin_response_timeout = nopin_response_timeout;
+	pr_debug("Set NopIn Response Timeout to %u for"
+		" Initiator Node %s\n", a->nopin_timeout,
+		iscsit_na_get_initiatorname(acl));
+
+	return 0;
+}
+
+int iscsit_na_random_datain_pdu_offsets(
+	struct iscsi_node_acl *acl,
+	u32 random_datain_pdu_offsets)
+{
+	struct iscsi_node_attrib *a = &acl->node_attrib;
+
+	if (random_datain_pdu_offsets != 0 && random_datain_pdu_offsets != 1) {
+		pr_err("Requested Random DataIN PDU Offsets: %u not"
+			" 0 or 1\n", random_datain_pdu_offsets);
+		return -EINVAL;
+	}
+
+	a->random_datain_pdu_offsets = random_datain_pdu_offsets;
+	pr_debug("Set Random DataIN PDU Offsets to %u for"
+		" Initiator Node %s\n", a->random_datain_pdu_offsets,
+		iscsit_na_get_initiatorname(acl));
+
+	return 0;
+}
+
+int iscsit_na_random_datain_seq_offsets(
+	struct iscsi_node_acl *acl,
+	u32 random_datain_seq_offsets)
+{
+	struct iscsi_node_attrib *a = &acl->node_attrib;
+
+	if (random_datain_seq_offsets != 0 && random_datain_seq_offsets != 1) {
+		pr_err("Requested Random DataIN Sequence Offsets: %u"
+			" not 0 or 1\n", random_datain_seq_offsets);
+		return -EINVAL;
+	}
+
+	a->random_datain_seq_offsets = random_datain_seq_offsets;
+	pr_debug("Set Random DataIN Sequence Offsets to %u for"
+		" Initiator Node %s\n", a->random_datain_seq_offsets,
+		iscsit_na_get_initiatorname(acl));
+
+	return 0;
+}
+
+int iscsit_na_random_r2t_offsets(
+	struct iscsi_node_acl *acl,
+	u32 random_r2t_offsets)
+{
+	struct iscsi_node_attrib *a = &acl->node_attrib;
+
+	if (random_r2t_offsets != 0 && random_r2t_offsets != 1) {
+		pr_err("Requested Random R2T Offsets: %u not"
+			" 0 or 1\n", random_r2t_offsets);
+		return -EINVAL;
+	}
+
+	a->random_r2t_offsets = random_r2t_offsets;
+	pr_debug("Set Random R2T Offsets to %u for"
+		" Initiator Node %s\n", a->random_r2t_offsets,
+		iscsit_na_get_initiatorname(acl));
+
+	return 0;
+}
+
+int iscsit_na_default_erl(
+	struct iscsi_node_acl *acl,
+	u32 default_erl)
+{
+	struct iscsi_node_attrib *a = &acl->node_attrib;
+
+	if (default_erl != 0 && default_erl != 1 && default_erl != 2) {
+		pr_err("Requested default ERL: %u not 0, 1, or 2\n",
+				default_erl);
+		return -EINVAL;
+	}
+
+	a->default_erl = default_erl;
+	pr_debug("Set use ERL0 flag to %u for Initiator"
+		" Node %s\n", a->default_erl,
+		iscsit_na_get_initiatorname(acl));
+
+	return 0;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_nodeattrib.h b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_nodeattrib.h
new file mode 100644
index 0000000..c970b32
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_nodeattrib.h
@@ -0,0 +1,14 @@
+#ifndef ISCSI_TARGET_NODEATTRIB_H
+#define ISCSI_TARGET_NODEATTRIB_H
+
+extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *);
+extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);
+extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32);
+extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32);
+extern int iscsit_na_nopin_response_timeout(struct iscsi_node_acl *, u32);
+extern int iscsit_na_random_datain_pdu_offsets(struct iscsi_node_acl *, u32);
+extern int iscsit_na_random_datain_seq_offsets(struct iscsi_node_acl *, u32);
+extern int iscsit_na_random_r2t_offsets(struct iscsi_node_acl *, u32);
+extern int iscsit_na_default_erl(struct iscsi_node_acl *, u32);
+
+#endif /* ISCSI_TARGET_NODEATTRIB_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_parameters.c b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_parameters.c
new file mode 100644
index 0000000..ed4abad
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_parameters.c
@@ -0,0 +1,1891 @@
+/*******************************************************************************
+ * This file contains main functions related to iSCSI Parameter negotiation.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/slab.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_parameters.h"
+
+int iscsi_login_rx_data(
+	struct iscsi_conn *conn,
+	char *buf,
+	int length)
+{
+	int rx_got;
+	struct kvec iov;
+
+	memset(&iov, 0, sizeof(struct kvec));
+	iov.iov_len	= length;
+	iov.iov_base	= buf;
+
+	/*
+	 * Initial Marker-less Interval.
+	 * Add the values regardless of IFMarker/OFMarker, considering
+	 * it may not be negoitated yet.
+	 */
+	conn->of_marker += length;
+
+	rx_got = rx_data(conn, &iov, 1, length);
+	if (rx_got != length) {
+		pr_err("rx_data returned %d, expecting %d.\n",
+				rx_got, length);
+		return -1;
+	}
+
+	return 0 ;
+}
+
+int iscsi_login_tx_data(
+	struct iscsi_conn *conn,
+	char *pdu_buf,
+	char *text_buf,
+	int text_length)
+{
+	int length, tx_sent;
+	struct kvec iov[2];
+
+	length = (ISCSI_HDR_LEN + text_length);
+
+	memset(&iov[0], 0, 2 * sizeof(struct kvec));
+	iov[0].iov_len		= ISCSI_HDR_LEN;
+	iov[0].iov_base		= pdu_buf;
+	iov[1].iov_len		= text_length;
+	iov[1].iov_base		= text_buf;
+
+	/*
+	 * Initial Marker-less Interval.
+	 * Add the values regardless of IFMarker/OFMarker, considering
+	 * it may not be negoitated yet.
+	 */
+	conn->if_marker += length;
+
+	tx_sent = tx_data(conn, &iov[0], 2, length);
+	if (tx_sent != length) {
+		pr_err("tx_data returned %d, expecting %d.\n",
+				tx_sent, length);
+		return -1;
+	}
+
+	return 0;
+}
+
+void iscsi_dump_conn_ops(struct iscsi_conn_ops *conn_ops)
+{
+	pr_debug("HeaderDigest: %s\n", (conn_ops->HeaderDigest) ?
+				"CRC32C" : "None");
+	pr_debug("DataDigest: %s\n", (conn_ops->DataDigest) ?
+				"CRC32C" : "None");
+	pr_debug("MaxRecvDataSegmentLength: %u\n",
+				conn_ops->MaxRecvDataSegmentLength);
+	pr_debug("OFMarker: %s\n", (conn_ops->OFMarker) ? "Yes" : "No");
+	pr_debug("IFMarker: %s\n", (conn_ops->IFMarker) ? "Yes" : "No");
+	if (conn_ops->OFMarker)
+		pr_debug("OFMarkInt: %u\n", conn_ops->OFMarkInt);
+	if (conn_ops->IFMarker)
+		pr_debug("IFMarkInt: %u\n", conn_ops->IFMarkInt);
+}
+
+void iscsi_dump_sess_ops(struct iscsi_sess_ops *sess_ops)
+{
+	pr_debug("InitiatorName: %s\n", sess_ops->InitiatorName);
+	pr_debug("InitiatorAlias: %s\n", sess_ops->InitiatorAlias);
+	pr_debug("TargetName: %s\n", sess_ops->TargetName);
+	pr_debug("TargetAlias: %s\n", sess_ops->TargetAlias);
+	pr_debug("TargetPortalGroupTag: %hu\n",
+			sess_ops->TargetPortalGroupTag);
+	pr_debug("MaxConnections: %hu\n", sess_ops->MaxConnections);
+	pr_debug("InitialR2T: %s\n",
+			(sess_ops->InitialR2T) ? "Yes" : "No");
+	pr_debug("ImmediateData: %s\n", (sess_ops->ImmediateData) ?
+			"Yes" : "No");
+	pr_debug("MaxBurstLength: %u\n", sess_ops->MaxBurstLength);
+	pr_debug("FirstBurstLength: %u\n", sess_ops->FirstBurstLength);
+	pr_debug("DefaultTime2Wait: %hu\n", sess_ops->DefaultTime2Wait);
+	pr_debug("DefaultTime2Retain: %hu\n",
+			sess_ops->DefaultTime2Retain);
+	pr_debug("MaxOutstandingR2T: %hu\n",
+			sess_ops->MaxOutstandingR2T);
+	pr_debug("DataPDUInOrder: %s\n",
+			(sess_ops->DataPDUInOrder) ? "Yes" : "No");
+	pr_debug("DataSequenceInOrder: %s\n",
+			(sess_ops->DataSequenceInOrder) ? "Yes" : "No");
+	pr_debug("ErrorRecoveryLevel: %hu\n",
+			sess_ops->ErrorRecoveryLevel);
+	pr_debug("SessionType: %s\n", (sess_ops->SessionType) ?
+			"Discovery" : "Normal");
+}
+
+void iscsi_print_params(struct iscsi_param_list *param_list)
+{
+	struct iscsi_param *param;
+
+	list_for_each_entry(param, &param_list->param_list, p_list)
+		pr_debug("%s: %s\n", param->name, param->value);
+}
+
+static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *param_list,
+		char *name, char *value, u8 phase, u8 scope, u8 sender,
+		u16 type_range, u8 use)
+{
+	struct iscsi_param *param = NULL;
+
+	param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
+	if (!param) {
+		pr_err("Unable to allocate memory for parameter.\n");
+		goto out;
+	}
+	INIT_LIST_HEAD(&param->p_list);
+
+	param->name = kzalloc(strlen(name) + 1, GFP_KERNEL);
+	if (!param->name) {
+		pr_err("Unable to allocate memory for parameter name.\n");
+		goto out;
+	}
+
+	param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
+	if (!param->value) {
+		pr_err("Unable to allocate memory for parameter value.\n");
+		goto out;
+	}
+
+	memcpy(param->name, name, strlen(name));
+	param->name[strlen(name)] = '\0';
+	memcpy(param->value, value, strlen(value));
+	param->value[strlen(value)] = '\0';
+	param->phase		= phase;
+	param->scope		= scope;
+	param->sender		= sender;
+	param->use		= use;
+	param->type_range	= type_range;
+
+	switch (param->type_range) {
+	case TYPERANGE_BOOL_AND:
+		param->type = TYPE_BOOL_AND;
+		break;
+	case TYPERANGE_BOOL_OR:
+		param->type = TYPE_BOOL_OR;
+		break;
+	case TYPERANGE_0_TO_2:
+	case TYPERANGE_0_TO_3600:
+	case TYPERANGE_0_TO_32767:
+	case TYPERANGE_0_TO_65535:
+	case TYPERANGE_1_TO_65535:
+	case TYPERANGE_2_TO_3600:
+	case TYPERANGE_512_TO_16777215:
+		param->type = TYPE_NUMBER;
+		break;
+	case TYPERANGE_AUTH:
+	case TYPERANGE_DIGEST:
+		param->type = TYPE_VALUE_LIST | TYPE_STRING;
+		break;
+	case TYPERANGE_MARKINT:
+		param->type = TYPE_NUMBER_RANGE;
+		param->type_range |= TYPERANGE_1_TO_65535;
+		break;
+	case TYPERANGE_ISCSINAME:
+	case TYPERANGE_SESSIONTYPE:
+	case TYPERANGE_TARGETADDRESS:
+	case TYPERANGE_UTF8:
+		param->type = TYPE_STRING;
+		break;
+	default:
+		pr_err("Unknown type_range 0x%02x\n",
+				param->type_range);
+		goto out;
+	}
+	list_add_tail(&param->p_list, &param_list->param_list);
+
+	return param;
+out:
+	if (param) {
+		kfree(param->value);
+		kfree(param->name);
+		kfree(param);
+	}
+
+	return NULL;
+}
+
+/* #warning Add extension keys */
+int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
+{
+	struct iscsi_param *param = NULL;
+	struct iscsi_param_list *pl;
+
+	pl = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
+	if (!pl) {
+		pr_err("Unable to allocate memory for"
+				" struct iscsi_param_list.\n");
+		return -1 ;
+	}
+	INIT_LIST_HEAD(&pl->param_list);
+	INIT_LIST_HEAD(&pl->extra_response_list);
+
+	/*
+	 * The format for setting the initial parameter definitions are:
+	 *
+	 * Parameter name:
+	 * Initial value:
+	 * Allowable phase:
+	 * Scope:
+	 * Allowable senders:
+	 * Typerange:
+	 * Use:
+	 */
+	param = iscsi_set_default_param(pl, AUTHMETHOD, INITIAL_AUTHMETHOD,
+			PHASE_SECURITY, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_AUTH, USE_INITIAL_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, HEADERDIGEST, INITIAL_HEADERDIGEST,
+			PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_DIGEST, USE_INITIAL_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, DATADIGEST, INITIAL_DATADIGEST,
+			PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_DIGEST, USE_INITIAL_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, MAXCONNECTIONS,
+			INITIAL_MAXCONNECTIONS, PHASE_OPERATIONAL,
+			SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_1_TO_65535, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, SENDTARGETS, INITIAL_SENDTARGETS,
+			PHASE_FFP0, SCOPE_SESSION_WIDE, SENDER_INITIATOR,
+			TYPERANGE_UTF8, 0);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, TARGETNAME, INITIAL_TARGETNAME,
+			PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_ISCSINAME, USE_ALL);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, INITIATORNAME,
+			INITIAL_INITIATORNAME, PHASE_DECLARATIVE,
+			SCOPE_SESSION_WIDE, SENDER_INITIATOR,
+			TYPERANGE_ISCSINAME, USE_INITIAL_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, TARGETALIAS, INITIAL_TARGETALIAS,
+			PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_TARGET,
+			TYPERANGE_UTF8, USE_ALL);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, INITIATORALIAS,
+			INITIAL_INITIATORALIAS, PHASE_DECLARATIVE,
+			SCOPE_SESSION_WIDE, SENDER_INITIATOR, TYPERANGE_UTF8,
+			USE_ALL);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, TARGETADDRESS,
+			INITIAL_TARGETADDRESS, PHASE_DECLARATIVE,
+			SCOPE_SESSION_WIDE, SENDER_TARGET,
+			TYPERANGE_TARGETADDRESS, USE_ALL);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, TARGETPORTALGROUPTAG,
+			INITIAL_TARGETPORTALGROUPTAG,
+			PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_TARGET,
+			TYPERANGE_0_TO_65535, USE_INITIAL_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, INITIALR2T, INITIAL_INITIALR2T,
+			PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_BOOL_OR, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, IMMEDIATEDATA,
+			INITIAL_IMMEDIATEDATA, PHASE_OPERATIONAL,
+			SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_AND,
+			USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, MAXRECVDATASEGMENTLENGTH,
+			INITIAL_MAXRECVDATASEGMENTLENGTH,
+			PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_512_TO_16777215, USE_ALL);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, MAXBURSTLENGTH,
+			INITIAL_MAXBURSTLENGTH, PHASE_OPERATIONAL,
+			SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_512_TO_16777215, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, FIRSTBURSTLENGTH,
+			INITIAL_FIRSTBURSTLENGTH,
+			PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_512_TO_16777215, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, DEFAULTTIME2WAIT,
+			INITIAL_DEFAULTTIME2WAIT,
+			PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_0_TO_3600, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, DEFAULTTIME2RETAIN,
+			INITIAL_DEFAULTTIME2RETAIN,
+			PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_0_TO_3600, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, MAXOUTSTANDINGR2T,
+			INITIAL_MAXOUTSTANDINGR2T,
+			PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_1_TO_65535, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, DATAPDUINORDER,
+			INITIAL_DATAPDUINORDER, PHASE_OPERATIONAL,
+			SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_OR,
+			USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, DATASEQUENCEINORDER,
+			INITIAL_DATASEQUENCEINORDER,
+			PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_BOOL_OR, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, ERRORRECOVERYLEVEL,
+			INITIAL_ERRORRECOVERYLEVEL,
+			PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_0_TO_2, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, SESSIONTYPE, INITIAL_SESSIONTYPE,
+			PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_INITIATOR,
+			TYPERANGE_SESSIONTYPE, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, IFMARKER, INITIAL_IFMARKER,
+			PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_BOOL_AND, USE_INITIAL_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, OFMARKER, INITIAL_OFMARKER,
+			PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_BOOL_AND, USE_INITIAL_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, IFMARKINT, INITIAL_IFMARKINT,
+			PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_MARKINT, USE_INITIAL_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, OFMARKINT, INITIAL_OFMARKINT,
+			PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_MARKINT, USE_INITIAL_ONLY);
+	if (!param)
+		goto out;
+
+	*param_list_ptr = pl;
+	return 0;
+out:
+	iscsi_release_param_list(pl);
+	return -1;
+}
+
+int iscsi_set_keys_to_negotiate(
+	int sessiontype,
+	struct iscsi_param_list *param_list)
+{
+	struct iscsi_param *param;
+
+	list_for_each_entry(param, &param_list->param_list, p_list) {
+		param->state = 0;
+		if (!strcmp(param->name, AUTHMETHOD)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, HEADERDIGEST)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, DATADIGEST)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, MAXCONNECTIONS)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, TARGETNAME)) {
+			continue;
+		} else if (!strcmp(param->name, INITIATORNAME)) {
+			continue;
+		} else if (!strcmp(param->name, TARGETALIAS)) {
+			if (param->value)
+				SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, INITIATORALIAS)) {
+			continue;
+		} else if (!strcmp(param->name, TARGETPORTALGROUPTAG)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, INITIALR2T)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, IMMEDIATEDATA)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, MAXBURSTLENGTH)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, DEFAULTTIME2RETAIN)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, MAXOUTSTANDINGR2T)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, DATAPDUINORDER)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, DATASEQUENCEINORDER)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, ERRORRECOVERYLEVEL)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, SESSIONTYPE)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, IFMARKER)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, OFMARKER)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, IFMARKINT)) {
+			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, OFMARKINT)) {
+			SET_PSTATE_NEGOTIATE(param);
+		}
+	}
+
+	return 0;
+}
+
+int iscsi_set_keys_irrelevant_for_discovery(
+	struct iscsi_param_list *param_list)
+{
+	struct iscsi_param *param;
+
+	list_for_each_entry(param, &param_list->param_list, p_list) {
+		if (!strcmp(param->name, MAXCONNECTIONS))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, INITIALR2T))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, IMMEDIATEDATA))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, MAXBURSTLENGTH))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, FIRSTBURSTLENGTH))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, MAXOUTSTANDINGR2T))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, DATAPDUINORDER))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, DATASEQUENCEINORDER))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, ERRORRECOVERYLEVEL))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, DEFAULTTIME2WAIT))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, DEFAULTTIME2RETAIN))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, IFMARKER))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, OFMARKER))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, IFMARKINT))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, OFMARKINT))
+			param->state &= ~PSTATE_NEGOTIATE;
+	}
+
+	return 0;
+}
+
+int iscsi_copy_param_list(
+	struct iscsi_param_list **dst_param_list,
+	struct iscsi_param_list *src_param_list,
+	int leading)
+{
+	struct iscsi_param *param = NULL;
+	struct iscsi_param *new_param = NULL;
+	struct iscsi_param_list *param_list = NULL;
+
+	param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
+	if (!param_list) {
+		pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
+		return -1;
+	}
+	INIT_LIST_HEAD(&param_list->param_list);
+	INIT_LIST_HEAD(&param_list->extra_response_list);
+
+	list_for_each_entry(param, &src_param_list->param_list, p_list) {
+		if (!leading && (param->scope & SCOPE_SESSION_WIDE)) {
+			if ((strcmp(param->name, "TargetName") != 0) &&
+			    (strcmp(param->name, "InitiatorName") != 0) &&
+			    (strcmp(param->name, "TargetPortalGroupTag") != 0))
+				continue;
+		}
+
+		new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
+		if (!new_param) {
+			pr_err("Unable to allocate memory for struct iscsi_param.\n");
+			goto err_out;
+		}
+
+		new_param->name = kstrdup(param->name, GFP_KERNEL);
+		new_param->value = kstrdup(param->value, GFP_KERNEL);
+		if (!new_param->value || !new_param->name) {
+			kfree(new_param->value);
+			kfree(new_param->name);
+			kfree(new_param);
+			pr_err("Unable to allocate memory for parameter name/value.\n");
+			goto err_out;
+		}
+
+		new_param->set_param = param->set_param;
+		new_param->phase = param->phase;
+		new_param->scope = param->scope;
+		new_param->sender = param->sender;
+		new_param->type = param->type;
+		new_param->use = param->use;
+		new_param->type_range = param->type_range;
+
+		list_add_tail(&new_param->p_list, &param_list->param_list);
+	}
+
+	if (!list_empty(&param_list->param_list)) {
+		*dst_param_list = param_list;
+	} else {
+		pr_err("No parameters allocated.\n");
+		goto err_out;
+	}
+
+	return 0;
+
+err_out:
+	iscsi_release_param_list(param_list);
+	return -1;
+}
+
+static void iscsi_release_extra_responses(struct iscsi_param_list *param_list)
+{
+	struct iscsi_extra_response *er, *er_tmp;
+
+	list_for_each_entry_safe(er, er_tmp, &param_list->extra_response_list,
+			er_list) {
+		list_del(&er->er_list);
+		kfree(er);
+	}
+}
+
+void iscsi_release_param_list(struct iscsi_param_list *param_list)
+{
+	struct iscsi_param *param, *param_tmp;
+
+	list_for_each_entry_safe(param, param_tmp, &param_list->param_list,
+			p_list) {
+		list_del(&param->p_list);
+
+		kfree(param->name);
+		param->name = NULL;
+		kfree(param->value);
+		param->value = NULL;
+		kfree(param);
+		param = NULL;
+	}
+
+	iscsi_release_extra_responses(param_list);
+
+	kfree(param_list);
+}
+
+struct iscsi_param *iscsi_find_param_from_key(
+	char *key,
+	struct iscsi_param_list *param_list)
+{
+	struct iscsi_param *param;
+
+	if (!key || !param_list) {
+		pr_err("Key or parameter list pointer is NULL.\n");
+		return NULL;
+	}
+
+	list_for_each_entry(param, &param_list->param_list, p_list) {
+		if (!strcmp(key, param->name))
+			return param;
+	}
+
+	pr_err("Unable to locate key \"%s\".\n", key);
+	return NULL;
+}
+
+int iscsi_extract_key_value(char *textbuf, char **key, char **value)
+{
+	*value = strchr(textbuf, '=');
+	if (!*value) {
+		pr_err("Unable to locate \"=\" seperator for key,"
+				" ignoring request.\n");
+		return -1;
+	}
+
+	*key = textbuf;
+	**value = '\0';
+	*value = *value + 1;
+
+	return 0;
+}
+
+int iscsi_update_param_value(struct iscsi_param *param, char *value)
+{
+	kfree(param->value);
+
+	param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
+	if (!param->value) {
+		pr_err("Unable to allocate memory for value.\n");
+		return -1;
+	}
+
+	memcpy(param->value, value, strlen(value));
+	param->value[strlen(value)] = '\0';
+
+	pr_debug("iSCSI Parameter updated to %s=%s\n",
+			param->name, param->value);
+	return 0;
+}
+
+static int iscsi_add_notunderstood_response(
+	char *key,
+	char *value,
+	struct iscsi_param_list *param_list)
+{
+	struct iscsi_extra_response *extra_response;
+
+	if (strlen(value) > VALUE_MAXLEN) {
+		pr_err("Value for notunderstood key \"%s\" exceeds %d,"
+			" protocol error.\n", key, VALUE_MAXLEN);
+		return -1;
+	}
+
+	extra_response = kzalloc(sizeof(struct iscsi_extra_response), GFP_KERNEL);
+	if (!extra_response) {
+		pr_err("Unable to allocate memory for"
+			" struct iscsi_extra_response.\n");
+		return -1;
+	}
+	INIT_LIST_HEAD(&extra_response->er_list);
+
+	strlcpy(extra_response->key, key, sizeof(extra_response->key));
+	strlcpy(extra_response->value, NOTUNDERSTOOD,
+		sizeof(extra_response->value));
+
+	list_add_tail(&extra_response->er_list,
+			&param_list->extra_response_list);
+	return 0;
+}
+
+static int iscsi_check_for_auth_key(char *key)
+{
+	/*
+	 * RFC 1994
+	 */
+	if (!strcmp(key, "CHAP_A") || !strcmp(key, "CHAP_I") ||
+	    !strcmp(key, "CHAP_C") || !strcmp(key, "CHAP_N") ||
+	    !strcmp(key, "CHAP_R"))
+		return 1;
+
+	/*
+	 * RFC 2945
+	 */
+	if (!strcmp(key, "SRP_U") || !strcmp(key, "SRP_N") ||
+	    !strcmp(key, "SRP_g") || !strcmp(key, "SRP_s") ||
+	    !strcmp(key, "SRP_A") || !strcmp(key, "SRP_B") ||
+	    !strcmp(key, "SRP_M") || !strcmp(key, "SRP_HM"))
+		return 1;
+
+	return 0;
+}
+
+static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
+{
+	if (IS_TYPE_BOOL_AND(param)) {
+		if (!strcmp(param->value, NO))
+			SET_PSTATE_REPLY_OPTIONAL(param);
+	} else if (IS_TYPE_BOOL_OR(param)) {
+		if (!strcmp(param->value, YES))
+			SET_PSTATE_REPLY_OPTIONAL(param);
+		 /*
+		  * Required for gPXE iSCSI boot client
+		  */
+		if (!strcmp(param->name, IMMEDIATEDATA))
+			SET_PSTATE_REPLY_OPTIONAL(param);
+	} else if (IS_TYPE_NUMBER(param)) {
+		if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
+			SET_PSTATE_REPLY_OPTIONAL(param);
+		/*
+		 * The GlobalSAN iSCSI Initiator for MacOSX does
+		 * not respond to MaxBurstLength, FirstBurstLength,
+		 * DefaultTime2Wait or DefaultTime2Retain parameter keys.
+		 * So, we set them to 'reply optional' here, and assume the
+		 * the defaults from iscsi_parameters.h if the initiator
+		 * is not RFC compliant and the keys are not negotiated.
+		 */
+		if (!strcmp(param->name, MAXBURSTLENGTH))
+			SET_PSTATE_REPLY_OPTIONAL(param);
+		if (!strcmp(param->name, FIRSTBURSTLENGTH))
+			SET_PSTATE_REPLY_OPTIONAL(param);
+		if (!strcmp(param->name, DEFAULTTIME2WAIT))
+			SET_PSTATE_REPLY_OPTIONAL(param);
+		if (!strcmp(param->name, DEFAULTTIME2RETAIN))
+			SET_PSTATE_REPLY_OPTIONAL(param);
+		/*
+		 * Required for gPXE iSCSI boot client
+		 */
+		if (!strcmp(param->name, MAXCONNECTIONS))
+			SET_PSTATE_REPLY_OPTIONAL(param);
+	} else if (IS_PHASE_DECLARATIVE(param))
+		SET_PSTATE_REPLY_OPTIONAL(param);
+}
+
+static int iscsi_check_boolean_value(struct iscsi_param *param, char *value)
+{
+	if (strcmp(value, YES) && strcmp(value, NO)) {
+		pr_err("Illegal value for \"%s\", must be either"
+			" \"%s\" or \"%s\".\n", param->name, YES, NO);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int iscsi_check_numerical_value(struct iscsi_param *param, char *value_ptr)
+{
+	char *tmpptr;
+	int value = 0;
+
+	value = simple_strtoul(value_ptr, &tmpptr, 0);
+
+/* #warning FIXME: Fix this */
+#if 0
+	if (strspn(endptr, WHITE_SPACE) != strlen(endptr)) {
+		pr_err("Illegal value \"%s\" for \"%s\".\n",
+			value, param->name);
+		return -1;
+	}
+#endif
+	if (IS_TYPERANGE_0_TO_2(param)) {
+		if ((value < 0) || (value > 2)) {
+			pr_err("Illegal value for \"%s\", must be"
+				" between 0 and 2.\n", param->name);
+			return -1;
+		}
+		return 0;
+	}
+	if (IS_TYPERANGE_0_TO_3600(param)) {
+		if ((value < 0) || (value > 3600)) {
+			pr_err("Illegal value for \"%s\", must be"
+				" between 0 and 3600.\n", param->name);
+			return -1;
+		}
+		return 0;
+	}
+	if (IS_TYPERANGE_0_TO_32767(param)) {
+		if ((value < 0) || (value > 32767)) {
+			pr_err("Illegal value for \"%s\", must be"
+				" between 0 and 32767.\n", param->name);
+			return -1;
+		}
+		return 0;
+	}
+	if (IS_TYPERANGE_0_TO_65535(param)) {
+		if ((value < 0) || (value > 65535)) {
+			pr_err("Illegal value for \"%s\", must be"
+				" between 0 and 65535.\n", param->name);
+			return -1;
+		}
+		return 0;
+	}
+	if (IS_TYPERANGE_1_TO_65535(param)) {
+		if ((value < 1) || (value > 65535)) {
+			pr_err("Illegal value for \"%s\", must be"
+				" between 1 and 65535.\n", param->name);
+			return -1;
+		}
+		return 0;
+	}
+	if (IS_TYPERANGE_2_TO_3600(param)) {
+		if ((value < 2) || (value > 3600)) {
+			pr_err("Illegal value for \"%s\", must be"
+				" between 2 and 3600.\n", param->name);
+			return -1;
+		}
+		return 0;
+	}
+	if (IS_TYPERANGE_512_TO_16777215(param)) {
+		if ((value < 512) || (value > 16777215)) {
+			pr_err("Illegal value for \"%s\", must be"
+				" between 512 and 16777215.\n", param->name);
+			return -1;
+		}
+		return 0;
+	}
+
+	return 0;
+}
+
+static int iscsi_check_numerical_range_value(struct iscsi_param *param, char *value)
+{
+	char *left_val_ptr = NULL, *right_val_ptr = NULL;
+	char *tilde_ptr = NULL;
+	u32 left_val, right_val, local_left_val;
+
+	if (strcmp(param->name, IFMARKINT) &&
+	    strcmp(param->name, OFMARKINT)) {
+		pr_err("Only parameters \"%s\" or \"%s\" may contain a"
+		       " numerical range value.\n", IFMARKINT, OFMARKINT);
+		return -1;
+	}
+
+	if (IS_PSTATE_PROPOSER(param))
+		return 0;
+
+	tilde_ptr = strchr(value, '~');
+	if (!tilde_ptr) {
+		pr_err("Unable to locate numerical range indicator"
+			" \"~\" for \"%s\".\n", param->name);
+		return -1;
+	}
+	*tilde_ptr = '\0';
+
+	left_val_ptr = value;
+	right_val_ptr = value + strlen(left_val_ptr) + 1;
+
+	if (iscsi_check_numerical_value(param, left_val_ptr) < 0)
+		return -1;
+	if (iscsi_check_numerical_value(param, right_val_ptr) < 0)
+		return -1;
+
+	left_val = simple_strtoul(left_val_ptr, NULL, 0);
+	right_val = simple_strtoul(right_val_ptr, NULL, 0);
+	*tilde_ptr = '~';
+
+	if (right_val < left_val) {
+		pr_err("Numerical range for parameter \"%s\" contains"
+			" a right value which is less than the left.\n",
+				param->name);
+		return -1;
+	}
+
+	/*
+	 * For now,  enforce reasonable defaults for [I,O]FMarkInt.
+	 */
+	tilde_ptr = strchr(param->value, '~');
+	if (!tilde_ptr) {
+		pr_err("Unable to locate numerical range indicator"
+			" \"~\" for \"%s\".\n", param->name);
+		return -1;
+	}
+	*tilde_ptr = '\0';
+
+	left_val_ptr = param->value;
+	right_val_ptr = param->value + strlen(left_val_ptr) + 1;
+
+	local_left_val = simple_strtoul(left_val_ptr, NULL, 0);
+	*tilde_ptr = '~';
+
+	if (param->set_param) {
+		if ((left_val < local_left_val) ||
+		    (right_val < local_left_val)) {
+			pr_err("Passed value range \"%u~%u\" is below"
+				" minimum left value \"%u\" for key \"%s\","
+				" rejecting.\n", left_val, right_val,
+				local_left_val, param->name);
+			return -1;
+		}
+	} else {
+		if ((left_val < local_left_val) &&
+		    (right_val < local_left_val)) {
+			pr_err("Received value range \"%u~%u\" is"
+				" below minimum left value \"%u\" for key"
+				" \"%s\", rejecting.\n", left_val, right_val,
+				local_left_val, param->name);
+			SET_PSTATE_REJECT(param);
+			if (iscsi_update_param_value(param, REJECT) < 0)
+				return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int iscsi_check_string_or_list_value(struct iscsi_param *param, char *value)
+{
+	if (IS_PSTATE_PROPOSER(param))
+		return 0;
+
+	if (IS_TYPERANGE_AUTH_PARAM(param)) {
+		if (strcmp(value, KRB5) && strcmp(value, SPKM1) &&
+		    strcmp(value, SPKM2) && strcmp(value, SRP) &&
+		    strcmp(value, CHAP) && strcmp(value, NONE)) {
+			pr_err("Illegal value for \"%s\", must be"
+				" \"%s\", \"%s\", \"%s\", \"%s\", \"%s\""
+				" or \"%s\".\n", param->name, KRB5,
+					SPKM1, SPKM2, SRP, CHAP, NONE);
+			return -1;
+		}
+	}
+	if (IS_TYPERANGE_DIGEST_PARAM(param)) {
+		if (strcmp(value, CRC32C) && strcmp(value, NONE)) {
+			pr_err("Illegal value for \"%s\", must be"
+				" \"%s\" or \"%s\".\n", param->name,
+					CRC32C, NONE);
+			return -1;
+		}
+	}
+	if (IS_TYPERANGE_SESSIONTYPE(param)) {
+		if (strcmp(value, DISCOVERY) && strcmp(value, NORMAL)) {
+			pr_err("Illegal value for \"%s\", must be"
+				" \"%s\" or \"%s\".\n", param->name,
+					DISCOVERY, NORMAL);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ *	This function is used to pick a value range number,  currently just
+ *	returns the lesser of both right values.
+ */
+static char *iscsi_get_value_from_number_range(
+	struct iscsi_param *param,
+	char *value)
+{
+	char *end_ptr, *tilde_ptr1 = NULL, *tilde_ptr2 = NULL;
+	u32 acceptor_right_value, proposer_right_value;
+
+	tilde_ptr1 = strchr(value, '~');
+	if (!tilde_ptr1)
+		return NULL;
+	*tilde_ptr1++ = '\0';
+	proposer_right_value = simple_strtoul(tilde_ptr1, &end_ptr, 0);
+
+	tilde_ptr2 = strchr(param->value, '~');
+	if (!tilde_ptr2)
+		return NULL;
+	*tilde_ptr2++ = '\0';
+	acceptor_right_value = simple_strtoul(tilde_ptr2, &end_ptr, 0);
+
+	return (acceptor_right_value >= proposer_right_value) ?
+		tilde_ptr1 : tilde_ptr2;
+}
+
+static char *iscsi_check_valuelist_for_support(
+	struct iscsi_param *param,
+	char *value)
+{
+	char *tmp1 = NULL, *tmp2 = NULL;
+	char *acceptor_values = NULL, *proposer_values = NULL;
+
+	acceptor_values = param->value;
+	proposer_values = value;
+
+	do {
+		if (!proposer_values)
+			return NULL;
+		tmp1 = strchr(proposer_values, ',');
+		if (tmp1)
+			*tmp1 = '\0';
+		acceptor_values = param->value;
+		do {
+			if (!acceptor_values) {
+				if (tmp1)
+					*tmp1 = ',';
+				return NULL;
+			}
+			tmp2 = strchr(acceptor_values, ',');
+			if (tmp2)
+				*tmp2 = '\0';
+			if (!acceptor_values || !proposer_values) {
+				if (tmp1)
+					*tmp1 = ',';
+				if (tmp2)
+					*tmp2 = ',';
+				return NULL;
+			}
+			if (!strcmp(acceptor_values, proposer_values)) {
+				if (tmp2)
+					*tmp2 = ',';
+				goto out;
+			}
+			if (tmp2)
+				*tmp2++ = ',';
+
+			acceptor_values = tmp2;
+			if (!acceptor_values)
+				break;
+		} while (acceptor_values);
+		if (tmp1)
+			*tmp1++ = ',';
+		proposer_values = tmp1;
+	} while (proposer_values);
+
+out:
+	return proposer_values;
+}
+
+static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value)
+{
+	u8 acceptor_boolean_value = 0, proposer_boolean_value = 0;
+	char *negoitated_value = NULL;
+
+	if (IS_PSTATE_ACCEPTOR(param)) {
+		pr_err("Received key \"%s\" twice, protocol error.\n",
+				param->name);
+		return -1;
+	}
+
+	if (IS_PSTATE_REJECT(param))
+		return 0;
+
+	if (IS_TYPE_BOOL_AND(param)) {
+		if (!strcmp(value, YES))
+			proposer_boolean_value = 1;
+		if (!strcmp(param->value, YES))
+			acceptor_boolean_value = 1;
+		if (acceptor_boolean_value && proposer_boolean_value)
+			do {} while (0);
+		else {
+			if (iscsi_update_param_value(param, NO) < 0)
+				return -1;
+			if (!proposer_boolean_value)
+				SET_PSTATE_REPLY_OPTIONAL(param);
+		}
+	} else if (IS_TYPE_BOOL_OR(param)) {
+		if (!strcmp(value, YES))
+			proposer_boolean_value = 1;
+		if (!strcmp(param->value, YES))
+			acceptor_boolean_value = 1;
+		if (acceptor_boolean_value || proposer_boolean_value) {
+			if (iscsi_update_param_value(param, YES) < 0)
+				return -1;
+			if (proposer_boolean_value)
+				SET_PSTATE_REPLY_OPTIONAL(param);
+		}
+	} else if (IS_TYPE_NUMBER(param)) {
+		char *tmpptr, buf[10];
+		u32 acceptor_value = simple_strtoul(param->value, &tmpptr, 0);
+		u32 proposer_value = simple_strtoul(value, &tmpptr, 0);
+
+		memset(buf, 0, 10);
+
+		if (!strcmp(param->name, MAXCONNECTIONS) ||
+		    !strcmp(param->name, MAXBURSTLENGTH) ||
+		    !strcmp(param->name, FIRSTBURSTLENGTH) ||
+		    !strcmp(param->name, MAXOUTSTANDINGR2T) ||
+		    !strcmp(param->name, DEFAULTTIME2RETAIN) ||
+		    !strcmp(param->name, ERRORRECOVERYLEVEL)) {
+			if (proposer_value > acceptor_value) {
+				sprintf(buf, "%u", acceptor_value);
+				if (iscsi_update_param_value(param,
+						&buf[0]) < 0)
+					return -1;
+			} else {
+				if (iscsi_update_param_value(param, value) < 0)
+					return -1;
+			}
+		} else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
+			if (acceptor_value > proposer_value) {
+				sprintf(buf, "%u", acceptor_value);
+				if (iscsi_update_param_value(param,
+						&buf[0]) < 0)
+					return -1;
+			} else {
+				if (iscsi_update_param_value(param, value) < 0)
+					return -1;
+			}
+		} else {
+			if (iscsi_update_param_value(param, value) < 0)
+				return -1;
+		}
+
+		if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
+			SET_PSTATE_REPLY_OPTIONAL(param);
+	} else if (IS_TYPE_NUMBER_RANGE(param)) {
+		negoitated_value = iscsi_get_value_from_number_range(
+					param, value);
+		if (!negoitated_value)
+			return -1;
+		if (iscsi_update_param_value(param, negoitated_value) < 0)
+			return -1;
+	} else if (IS_TYPE_VALUE_LIST(param)) {
+		negoitated_value = iscsi_check_valuelist_for_support(
+					param, value);
+		if (!negoitated_value) {
+			pr_err("Proposer's value list \"%s\" contains"
+				" no valid values from Acceptor's value list"
+				" \"%s\".\n", value, param->value);
+			return -1;
+		}
+		if (iscsi_update_param_value(param, negoitated_value) < 0)
+			return -1;
+	} else if (IS_PHASE_DECLARATIVE(param)) {
+		if (iscsi_update_param_value(param, value) < 0)
+			return -1;
+		SET_PSTATE_REPLY_OPTIONAL(param);
+	}
+
+	return 0;
+}
+
+static int iscsi_check_proposer_state(struct iscsi_param *param, char *value)
+{
+	if (IS_PSTATE_RESPONSE_GOT(param)) {
+		pr_err("Received key \"%s\" twice, protocol error.\n",
+				param->name);
+		return -1;
+	}
+
+	if (IS_TYPE_NUMBER_RANGE(param)) {
+		u32 left_val = 0, right_val = 0, recieved_value = 0;
+		char *left_val_ptr = NULL, *right_val_ptr = NULL;
+		char *tilde_ptr = NULL;
+
+		if (!strcmp(value, IRRELEVANT) || !strcmp(value, REJECT)) {
+			if (iscsi_update_param_value(param, value) < 0)
+				return -1;
+			return 0;
+		}
+
+		tilde_ptr = strchr(value, '~');
+		if (tilde_ptr) {
+			pr_err("Illegal \"~\" in response for \"%s\".\n",
+					param->name);
+			return -1;
+		}
+		tilde_ptr = strchr(param->value, '~');
+		if (!tilde_ptr) {
+			pr_err("Unable to locate numerical range"
+				" indicator \"~\" for \"%s\".\n", param->name);
+			return -1;
+		}
+		*tilde_ptr = '\0';
+
+		left_val_ptr = param->value;
+		right_val_ptr = param->value + strlen(left_val_ptr) + 1;
+		left_val = simple_strtoul(left_val_ptr, NULL, 0);
+		right_val = simple_strtoul(right_val_ptr, NULL, 0);
+		recieved_value = simple_strtoul(value, NULL, 0);
+
+		*tilde_ptr = '~';
+
+		if ((recieved_value < left_val) ||
+		    (recieved_value > right_val)) {
+			pr_err("Illegal response \"%s=%u\", value must"
+				" be between %u and %u.\n", param->name,
+				recieved_value, left_val, right_val);
+			return -1;
+		}
+	} else if (IS_TYPE_VALUE_LIST(param)) {
+		char *comma_ptr = NULL, *tmp_ptr = NULL;
+
+		comma_ptr = strchr(value, ',');
+		if (comma_ptr) {
+			pr_err("Illegal \",\" in response for \"%s\".\n",
+					param->name);
+			return -1;
+		}
+
+		tmp_ptr = iscsi_check_valuelist_for_support(param, value);
+		if (!tmp_ptr)
+			return -1;
+	}
+
+	if (iscsi_update_param_value(param, value) < 0)
+		return -1;
+
+	return 0;
+}
+
+static int iscsi_check_value(struct iscsi_param *param, char *value)
+{
+	char *comma_ptr = NULL;
+
+	if (!strcmp(value, REJECT)) {
+		if (!strcmp(param->name, IFMARKINT) ||
+		    !strcmp(param->name, OFMARKINT)) {
+			/*
+			 * Reject is not fatal for [I,O]FMarkInt,  and causes
+			 * [I,O]FMarker to be reset to No. (See iSCSI v20 A.3.2)
+			 */
+			SET_PSTATE_REJECT(param);
+			return 0;
+		}
+		pr_err("Received %s=%s\n", param->name, value);
+		return -1;
+	}
+	if (!strcmp(value, IRRELEVANT)) {
+		pr_debug("Received %s=%s\n", param->name, value);
+		SET_PSTATE_IRRELEVANT(param);
+		return 0;
+	}
+	if (!strcmp(value, NOTUNDERSTOOD)) {
+		if (!IS_PSTATE_PROPOSER(param)) {
+			pr_err("Received illegal offer %s=%s\n",
+				param->name, value);
+			return -1;
+		}
+
+/* #warning FIXME: Add check for X-ExtensionKey here */
+		pr_err("Standard iSCSI key \"%s\" cannot be answered"
+			" with \"%s\", protocol error.\n", param->name, value);
+		return -1;
+	}
+
+	do {
+		comma_ptr = NULL;
+		comma_ptr = strchr(value, ',');
+
+		if (comma_ptr && !IS_TYPE_VALUE_LIST(param)) {
+			pr_err("Detected value seperator \",\", but"
+				" key \"%s\" does not allow a value list,"
+				" protocol error.\n", param->name);
+			return -1;
+		}
+		if (comma_ptr)
+			*comma_ptr = '\0';
+
+		if (strlen(value) > VALUE_MAXLEN) {
+			pr_err("Value for key \"%s\" exceeds %d,"
+				" protocol error.\n", param->name,
+				VALUE_MAXLEN);
+			return -1;
+		}
+
+		if (IS_TYPE_BOOL_AND(param) || IS_TYPE_BOOL_OR(param)) {
+			if (iscsi_check_boolean_value(param, value) < 0)
+				return -1;
+		} else if (IS_TYPE_NUMBER(param)) {
+			if (iscsi_check_numerical_value(param, value) < 0)
+				return -1;
+		} else if (IS_TYPE_NUMBER_RANGE(param)) {
+			if (iscsi_check_numerical_range_value(param, value) < 0)
+				return -1;
+		} else if (IS_TYPE_STRING(param) || IS_TYPE_VALUE_LIST(param)) {
+			if (iscsi_check_string_or_list_value(param, value) < 0)
+				return -1;
+		} else {
+			pr_err("Huh? 0x%02x\n", param->type);
+			return -1;
+		}
+
+		if (comma_ptr)
+			*comma_ptr++ = ',';
+
+		value = comma_ptr;
+	} while (value);
+
+	return 0;
+}
+
+static struct iscsi_param *__iscsi_check_key(
+	char *key,
+	int sender,
+	struct iscsi_param_list *param_list)
+{
+	struct iscsi_param *param;
+
+	if (strlen(key) > KEY_MAXLEN) {
+		pr_err("Length of key name \"%s\" exceeds %d.\n",
+			key, KEY_MAXLEN);
+		return NULL;
+	}
+
+	param = iscsi_find_param_from_key(key, param_list);
+	if (!param)
+		return NULL;
+
+	if ((sender & SENDER_INITIATOR) && !IS_SENDER_INITIATOR(param)) {
+		pr_err("Key \"%s\" may not be sent to %s,"
+			" protocol error.\n", param->name,
+			(sender & SENDER_RECEIVER) ? "target" : "initiator");
+		return NULL;
+	}
+
+	if ((sender & SENDER_TARGET) && !IS_SENDER_TARGET(param)) {
+		pr_err("Key \"%s\" may not be sent to %s,"
+			" protocol error.\n", param->name,
+			(sender & SENDER_RECEIVER) ? "initiator" : "target");
+		return NULL;
+	}
+
+	return param;
+}
+
+static struct iscsi_param *iscsi_check_key(
+	char *key,
+	int phase,
+	int sender,
+	struct iscsi_param_list *param_list)
+{
+	struct iscsi_param *param;
+	/*
+	 * Key name length must not exceed 63 bytes. (See iSCSI v20 5.1)
+	 */
+	if (strlen(key) > KEY_MAXLEN) {
+		pr_err("Length of key name \"%s\" exceeds %d.\n",
+			key, KEY_MAXLEN);
+		return NULL;
+	}
+
+	param = iscsi_find_param_from_key(key, param_list);
+	if (!param)
+		return NULL;
+
+	if ((sender & SENDER_INITIATOR) && !IS_SENDER_INITIATOR(param)) {
+		pr_err("Key \"%s\" may not be sent to %s,"
+			" protocol error.\n", param->name,
+			(sender & SENDER_RECEIVER) ? "target" : "initiator");
+		return NULL;
+	}
+	if ((sender & SENDER_TARGET) && !IS_SENDER_TARGET(param)) {
+		pr_err("Key \"%s\" may not be sent to %s,"
+				" protocol error.\n", param->name,
+			(sender & SENDER_RECEIVER) ? "initiator" : "target");
+		return NULL;
+	}
+
+	if (IS_PSTATE_ACCEPTOR(param)) {
+		pr_err("Key \"%s\" received twice, protocol error.\n",
+				key);
+		return NULL;
+	}
+
+	if (!phase)
+		return param;
+
+	if (!(param->phase & phase)) {
+		pr_err("Key \"%s\" may not be negotiated during ",
+				param->name);
+		switch (phase) {
+		case PHASE_SECURITY:
+			pr_debug("Security phase.\n");
+			break;
+		case PHASE_OPERATIONAL:
+			pr_debug("Operational phase.\n");
+		default:
+			pr_debug("Unknown phase.\n");
+		}
+		return NULL;
+	}
+
+	return param;
+}
+
+static int iscsi_enforce_integrity_rules(
+	u8 phase,
+	struct iscsi_param_list *param_list)
+{
+	char *tmpptr;
+	u8 DataSequenceInOrder = 0;
+	u8 ErrorRecoveryLevel = 0, SessionType = 0;
+	u8 IFMarker = 0, OFMarker = 0;
+	u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1;
+	u32 FirstBurstLength = 0, MaxBurstLength = 0;
+	struct iscsi_param *param = NULL;
+
+	list_for_each_entry(param, &param_list->param_list, p_list) {
+		if (!(param->phase & phase))
+			continue;
+		if (!strcmp(param->name, SESSIONTYPE))
+			if (!strcmp(param->value, NORMAL))
+				SessionType = 1;
+		if (!strcmp(param->name, ERRORRECOVERYLEVEL))
+			ErrorRecoveryLevel = simple_strtoul(param->value,
+					&tmpptr, 0);
+		if (!strcmp(param->name, DATASEQUENCEINORDER))
+			if (!strcmp(param->value, YES))
+				DataSequenceInOrder = 1;
+		if (!strcmp(param->name, MAXBURSTLENGTH))
+			MaxBurstLength = simple_strtoul(param->value,
+					&tmpptr, 0);
+		if (!strcmp(param->name, IFMARKER))
+			if (!strcmp(param->value, YES))
+				IFMarker = 1;
+		if (!strcmp(param->name, OFMARKER))
+			if (!strcmp(param->value, YES))
+				OFMarker = 1;
+		if (!strcmp(param->name, IFMARKINT))
+			if (!strcmp(param->value, REJECT))
+				IFMarkInt_Reject = 1;
+		if (!strcmp(param->name, OFMARKINT))
+			if (!strcmp(param->value, REJECT))
+				OFMarkInt_Reject = 1;
+	}
+
+	list_for_each_entry(param, &param_list->param_list, p_list) {
+		if (!(param->phase & phase))
+			continue;
+		if (!SessionType && (!IS_PSTATE_ACCEPTOR(param) &&
+		     (strcmp(param->name, IFMARKER) &&
+		      strcmp(param->name, OFMARKER) &&
+		      strcmp(param->name, IFMARKINT) &&
+		      strcmp(param->name, OFMARKINT))))
+			continue;
+		if (!strcmp(param->name, MAXOUTSTANDINGR2T) &&
+		    DataSequenceInOrder && (ErrorRecoveryLevel > 0)) {
+			if (strcmp(param->value, "1")) {
+				if (iscsi_update_param_value(param, "1") < 0)
+					return -1;
+				pr_debug("Reset \"%s\" to \"%s\".\n",
+					param->name, param->value);
+			}
+		}
+		if (!strcmp(param->name, MAXCONNECTIONS) && !SessionType) {
+			if (strcmp(param->value, "1")) {
+				if (iscsi_update_param_value(param, "1") < 0)
+					return -1;
+				pr_debug("Reset \"%s\" to \"%s\".\n",
+					param->name, param->value);
+			}
+		}
+		if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
+			FirstBurstLength = simple_strtoul(param->value,
+					&tmpptr, 0);
+			if (FirstBurstLength > MaxBurstLength) {
+				char tmpbuf[10];
+				memset(tmpbuf, 0, 10);
+				sprintf(tmpbuf, "%u", MaxBurstLength);
+				if (iscsi_update_param_value(param, tmpbuf))
+					return -1;
+				pr_debug("Reset \"%s\" to \"%s\".\n",
+					param->name, param->value);
+			}
+		}
+		if (!strcmp(param->name, IFMARKER) && IFMarkInt_Reject) {
+			if (iscsi_update_param_value(param, NO) < 0)
+				return -1;
+			IFMarker = 0;
+			pr_debug("Reset \"%s\" to \"%s\".\n",
+					param->name, param->value);
+		}
+		if (!strcmp(param->name, OFMARKER) && OFMarkInt_Reject) {
+			if (iscsi_update_param_value(param, NO) < 0)
+				return -1;
+			OFMarker = 0;
+			pr_debug("Reset \"%s\" to \"%s\".\n",
+					 param->name, param->value);
+		}
+		if (!strcmp(param->name, IFMARKINT) && !IFMarker) {
+			if (!strcmp(param->value, REJECT))
+				continue;
+			param->state &= ~PSTATE_NEGOTIATE;
+			if (iscsi_update_param_value(param, IRRELEVANT) < 0)
+				return -1;
+			pr_debug("Reset \"%s\" to \"%s\".\n",
+					param->name, param->value);
+		}
+		if (!strcmp(param->name, OFMARKINT) && !OFMarker) {
+			if (!strcmp(param->value, REJECT))
+				continue;
+			param->state &= ~PSTATE_NEGOTIATE;
+			if (iscsi_update_param_value(param, IRRELEVANT) < 0)
+				return -1;
+			pr_debug("Reset \"%s\" to \"%s\".\n",
+					param->name, param->value);
+		}
+	}
+
+	return 0;
+}
+
+int iscsi_decode_text_input(
+	u8 phase,
+	u8 sender,
+	char *textbuf,
+	u32 length,
+	struct iscsi_param_list *param_list)
+{
+	char *tmpbuf, *start = NULL, *end = NULL;
+
+	tmpbuf = kzalloc(length + 1, GFP_KERNEL);
+	if (!tmpbuf) {
+		pr_err("Unable to allocate memory for tmpbuf.\n");
+		return -1;
+	}
+
+	memcpy(tmpbuf, textbuf, length);
+	tmpbuf[length] = '\0';
+	start = tmpbuf;
+	end = (start + length);
+
+	while (start < end) {
+		char *key, *value;
+		struct iscsi_param *param;
+
+		if (iscsi_extract_key_value(start, &key, &value) < 0) {
+			kfree(tmpbuf);
+			return -1;
+		}
+
+		pr_debug("Got key: %s=%s\n", key, value);
+
+		if (phase & PHASE_SECURITY) {
+			if (iscsi_check_for_auth_key(key) > 0) {
+				kfree(tmpbuf);
+				return 1;
+			}
+		}
+
+		param = iscsi_check_key(key, phase, sender, param_list);
+		if (!param) {
+			if (iscsi_add_notunderstood_response(key,
+					value, param_list) < 0) {
+				kfree(tmpbuf);
+				return -1;
+			}
+			start += strlen(key) + strlen(value) + 2;
+			continue;
+		}
+		if (iscsi_check_value(param, value) < 0) {
+			kfree(tmpbuf);
+			return -1;
+		}
+
+		start += strlen(key) + strlen(value) + 2;
+
+		if (IS_PSTATE_PROPOSER(param)) {
+			if (iscsi_check_proposer_state(param, value) < 0) {
+				kfree(tmpbuf);
+				return -1;
+			}
+			SET_PSTATE_RESPONSE_GOT(param);
+		} else {
+			if (iscsi_check_acceptor_state(param, value) < 0) {
+				kfree(tmpbuf);
+				return -1;
+			}
+			SET_PSTATE_ACCEPTOR(param);
+		}
+	}
+
+	kfree(tmpbuf);
+	return 0;
+}
+
+int iscsi_encode_text_output(
+	u8 phase,
+	u8 sender,
+	char *textbuf,
+	u32 *length,
+	struct iscsi_param_list *param_list)
+{
+	char *output_buf = NULL;
+	struct iscsi_extra_response *er;
+	struct iscsi_param *param;
+
+	output_buf = textbuf + *length;
+
+	if (iscsi_enforce_integrity_rules(phase, param_list) < 0)
+		return -1;
+
+	list_for_each_entry(param, &param_list->param_list, p_list) {
+		if (!(param->sender & sender))
+			continue;
+		if (IS_PSTATE_ACCEPTOR(param) &&
+		    !IS_PSTATE_RESPONSE_SENT(param) &&
+		    !IS_PSTATE_REPLY_OPTIONAL(param) &&
+		    (param->phase & phase)) {
+			*length += sprintf(output_buf, "%s=%s",
+				param->name, param->value);
+			*length += 1;
+			output_buf = textbuf + *length;
+			SET_PSTATE_RESPONSE_SENT(param);
+			pr_debug("Sending key: %s=%s\n",
+				param->name, param->value);
+			continue;
+		}
+		if (IS_PSTATE_NEGOTIATE(param) &&
+		    !IS_PSTATE_ACCEPTOR(param) &&
+		    !IS_PSTATE_PROPOSER(param) &&
+		    (param->phase & phase)) {
+			*length += sprintf(output_buf, "%s=%s",
+				param->name, param->value);
+			*length += 1;
+			output_buf = textbuf + *length;
+			SET_PSTATE_PROPOSER(param);
+			iscsi_check_proposer_for_optional_reply(param);
+			pr_debug("Sending key: %s=%s\n",
+				param->name, param->value);
+		}
+	}
+
+	list_for_each_entry(er, &param_list->extra_response_list, er_list) {
+		*length += sprintf(output_buf, "%s=%s", er->key, er->value);
+		*length += 1;
+		output_buf = textbuf + *length;
+		pr_debug("Sending key: %s=%s\n", er->key, er->value);
+	}
+	iscsi_release_extra_responses(param_list);
+
+	return 0;
+}
+
+int iscsi_check_negotiated_keys(struct iscsi_param_list *param_list)
+{
+	int ret = 0;
+	struct iscsi_param *param;
+
+	list_for_each_entry(param, &param_list->param_list, p_list) {
+		if (IS_PSTATE_NEGOTIATE(param) &&
+		    IS_PSTATE_PROPOSER(param) &&
+		    !IS_PSTATE_RESPONSE_GOT(param) &&
+		    !IS_PSTATE_REPLY_OPTIONAL(param) &&
+		    !IS_PHASE_DECLARATIVE(param)) {
+			pr_err("No response for proposed key \"%s\".\n",
+					param->name);
+			ret = -1;
+		}
+	}
+
+	return ret;
+}
+
+int iscsi_change_param_value(
+	char *keyvalue,
+	struct iscsi_param_list *param_list,
+	int check_key)
+{
+	char *key = NULL, *value = NULL;
+	struct iscsi_param *param;
+	int sender = 0;
+
+	if (iscsi_extract_key_value(keyvalue, &key, &value) < 0)
+		return -1;
+
+	if (!check_key) {
+		param = __iscsi_check_key(keyvalue, sender, param_list);
+		if (!param)
+			return -1;
+	} else {
+		param = iscsi_check_key(keyvalue, 0, sender, param_list);
+		if (!param)
+			return -1;
+
+		param->set_param = 1;
+		if (iscsi_check_value(param, value) < 0) {
+			param->set_param = 0;
+			return -1;
+		}
+		param->set_param = 0;
+	}
+
+	if (iscsi_update_param_value(param, value) < 0)
+		return -1;
+
+	return 0;
+}
+
+void iscsi_set_connection_parameters(
+	struct iscsi_conn_ops *ops,
+	struct iscsi_param_list *param_list)
+{
+	char *tmpptr;
+	struct iscsi_param *param;
+
+	pr_debug("---------------------------------------------------"
+			"---------------\n");
+	list_for_each_entry(param, &param_list->param_list, p_list) {
+		if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param))
+			continue;
+		if (!strcmp(param->name, AUTHMETHOD)) {
+			pr_debug("AuthMethod:                   %s\n",
+				param->value);
+		} else if (!strcmp(param->name, HEADERDIGEST)) {
+			ops->HeaderDigest = !strcmp(param->value, CRC32C);
+			pr_debug("HeaderDigest:                 %s\n",
+				param->value);
+		} else if (!strcmp(param->name, DATADIGEST)) {
+			ops->DataDigest = !strcmp(param->value, CRC32C);
+			pr_debug("DataDigest:                   %s\n",
+				param->value);
+		} else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
+			ops->MaxRecvDataSegmentLength =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("MaxRecvDataSegmentLength:     %s\n",
+				param->value);
+		} else if (!strcmp(param->name, OFMARKER)) {
+			ops->OFMarker = !strcmp(param->value, YES);
+			pr_debug("OFMarker:                     %s\n",
+				param->value);
+		} else if (!strcmp(param->name, IFMARKER)) {
+			ops->IFMarker = !strcmp(param->value, YES);
+			pr_debug("IFMarker:                     %s\n",
+				param->value);
+		} else if (!strcmp(param->name, OFMARKINT)) {
+			ops->OFMarkInt =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("OFMarkInt:                    %s\n",
+				param->value);
+		} else if (!strcmp(param->name, IFMARKINT)) {
+			ops->IFMarkInt =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("IFMarkInt:                    %s\n",
+				param->value);
+		}
+	}
+	pr_debug("----------------------------------------------------"
+			"--------------\n");
+}
+
+void iscsi_set_session_parameters(
+	struct iscsi_sess_ops *ops,
+	struct iscsi_param_list *param_list,
+	int leading)
+{
+	char *tmpptr;
+	struct iscsi_param *param;
+
+	pr_debug("----------------------------------------------------"
+			"--------------\n");
+	list_for_each_entry(param, &param_list->param_list, p_list) {
+		if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param))
+			continue;
+		if (!strcmp(param->name, INITIATORNAME)) {
+			if (!param->value)
+				continue;
+			if (leading)
+				snprintf(ops->InitiatorName,
+						sizeof(ops->InitiatorName),
+						"%s", param->value);
+			pr_debug("InitiatorName:                %s\n",
+				param->value);
+		} else if (!strcmp(param->name, INITIATORALIAS)) {
+			if (!param->value)
+				continue;
+			snprintf(ops->InitiatorAlias,
+						sizeof(ops->InitiatorAlias),
+						"%s", param->value);
+			pr_debug("InitiatorAlias:               %s\n",
+				param->value);
+		} else if (!strcmp(param->name, TARGETNAME)) {
+			if (!param->value)
+				continue;
+			if (leading)
+				snprintf(ops->TargetName,
+						sizeof(ops->TargetName),
+						"%s", param->value);
+			pr_debug("TargetName:                   %s\n",
+				param->value);
+		} else if (!strcmp(param->name, TARGETALIAS)) {
+			if (!param->value)
+				continue;
+			snprintf(ops->TargetAlias, sizeof(ops->TargetAlias),
+					"%s", param->value);
+			pr_debug("TargetAlias:                  %s\n",
+				param->value);
+		} else if (!strcmp(param->name, TARGETPORTALGROUPTAG)) {
+			ops->TargetPortalGroupTag =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("TargetPortalGroupTag:         %s\n",
+				param->value);
+		} else if (!strcmp(param->name, MAXCONNECTIONS)) {
+			ops->MaxConnections =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("MaxConnections:               %s\n",
+				param->value);
+		} else if (!strcmp(param->name, INITIALR2T)) {
+			ops->InitialR2T = !strcmp(param->value, YES);
+			 pr_debug("InitialR2T:                   %s\n",
+				param->value);
+		} else if (!strcmp(param->name, IMMEDIATEDATA)) {
+			ops->ImmediateData = !strcmp(param->value, YES);
+			pr_debug("ImmediateData:                %s\n",
+				param->value);
+		} else if (!strcmp(param->name, MAXBURSTLENGTH)) {
+			ops->MaxBurstLength =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("MaxBurstLength:               %s\n",
+				param->value);
+		} else if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
+			ops->FirstBurstLength =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("FirstBurstLength:             %s\n",
+				param->value);
+		} else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
+			ops->DefaultTime2Wait =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("DefaultTime2Wait:             %s\n",
+				param->value);
+		} else if (!strcmp(param->name, DEFAULTTIME2RETAIN)) {
+			ops->DefaultTime2Retain =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("DefaultTime2Retain:           %s\n",
+				param->value);
+		} else if (!strcmp(param->name, MAXOUTSTANDINGR2T)) {
+			ops->MaxOutstandingR2T =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("MaxOutstandingR2T:            %s\n",
+				param->value);
+		} else if (!strcmp(param->name, DATAPDUINORDER)) {
+			ops->DataPDUInOrder = !strcmp(param->value, YES);
+			pr_debug("DataPDUInOrder:               %s\n",
+				param->value);
+		} else if (!strcmp(param->name, DATASEQUENCEINORDER)) {
+			ops->DataSequenceInOrder = !strcmp(param->value, YES);
+			pr_debug("DataSequenceInOrder:          %s\n",
+				param->value);
+		} else if (!strcmp(param->name, ERRORRECOVERYLEVEL)) {
+			ops->ErrorRecoveryLevel =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("ErrorRecoveryLevel:           %s\n",
+				param->value);
+		} else if (!strcmp(param->name, SESSIONTYPE)) {
+			ops->SessionType = !strcmp(param->value, DISCOVERY);
+			pr_debug("SessionType:                  %s\n",
+				param->value);
+		}
+	}
+	pr_debug("----------------------------------------------------"
+			"--------------\n");
+
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_parameters.h b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_parameters.h
new file mode 100644
index 0000000..83eed65
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_parameters.h
@@ -0,0 +1,271 @@
+#ifndef ISCSI_PARAMETERS_H
+#define ISCSI_PARAMETERS_H
+
+#include <scsi/iscsi_proto.h>
+
+struct iscsi_extra_response {
+	char key[KEY_MAXLEN];
+	char value[32];
+	struct list_head er_list;
+} ____cacheline_aligned;
+
+struct iscsi_param {
+	char *name;
+	char *value;
+	u8 set_param;
+	u8 phase;
+	u8 scope;
+	u8 sender;
+	u8 type;
+	u8 use;
+	u16 type_range;
+	u32 state;
+	struct list_head p_list;
+} ____cacheline_aligned;
+
+extern int iscsi_login_rx_data(struct iscsi_conn *, char *, int);
+extern int iscsi_login_tx_data(struct iscsi_conn *, char *, char *, int);
+extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
+extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *);
+extern void iscsi_print_params(struct iscsi_param_list *);
+extern int iscsi_create_default_params(struct iscsi_param_list **);
+extern int iscsi_set_keys_to_negotiate(int, struct iscsi_param_list *);
+extern int iscsi_set_keys_irrelevant_for_discovery(struct iscsi_param_list *);
+extern int iscsi_copy_param_list(struct iscsi_param_list **,
+			struct iscsi_param_list *, int);
+extern int iscsi_change_param_value(char *, struct iscsi_param_list *, int);
+extern void iscsi_release_param_list(struct iscsi_param_list *);
+extern struct iscsi_param *iscsi_find_param_from_key(char *, struct iscsi_param_list *);
+extern int iscsi_extract_key_value(char *, char **, char **);
+extern int iscsi_update_param_value(struct iscsi_param *, char *);
+extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_param_list *);
+extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
+			struct iscsi_param_list *);
+extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
+extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *,
+			struct iscsi_param_list *);
+extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
+			struct iscsi_param_list *, int);
+
+#define YES				"Yes"
+#define NO				"No"
+#define ALL				"All"
+#define IRRELEVANT			"Irrelevant"
+#define NONE				"None"
+#define NOTUNDERSTOOD			"NotUnderstood"
+#define REJECT				"Reject"
+
+/*
+ * The Parameter Names.
+ */
+#define AUTHMETHOD			"AuthMethod"
+#define HEADERDIGEST			"HeaderDigest"
+#define DATADIGEST			"DataDigest"
+#define MAXCONNECTIONS			"MaxConnections"
+#define SENDTARGETS			"SendTargets"
+#define TARGETNAME			"TargetName"
+#define INITIATORNAME			"InitiatorName"
+#define TARGETALIAS			"TargetAlias"
+#define INITIATORALIAS			"InitiatorAlias"
+#define TARGETADDRESS			"TargetAddress"
+#define TARGETPORTALGROUPTAG		"TargetPortalGroupTag"
+#define INITIALR2T			"InitialR2T"
+#define IMMEDIATEDATA			"ImmediateData"
+#define MAXRECVDATASEGMENTLENGTH	"MaxRecvDataSegmentLength"
+#define MAXBURSTLENGTH			"MaxBurstLength"
+#define FIRSTBURSTLENGTH		"FirstBurstLength"
+#define DEFAULTTIME2WAIT		"DefaultTime2Wait"
+#define DEFAULTTIME2RETAIN		"DefaultTime2Retain"
+#define MAXOUTSTANDINGR2T		"MaxOutstandingR2T"
+#define DATAPDUINORDER			"DataPDUInOrder"
+#define DATASEQUENCEINORDER		"DataSequenceInOrder"
+#define ERRORRECOVERYLEVEL		"ErrorRecoveryLevel"
+#define SESSIONTYPE			"SessionType"
+#define IFMARKER			"IFMarker"
+#define OFMARKER			"OFMarker"
+#define IFMARKINT			"IFMarkInt"
+#define OFMARKINT			"OFMarkInt"
+#define X_EXTENSIONKEY			"X-com.sbei.version"
+#define X_EXTENSIONKEY_CISCO_NEW	"X-com.cisco.protocol"
+#define X_EXTENSIONKEY_CISCO_OLD	"X-com.cisco.iscsi.draft"
+
+/*
+ * For AuthMethod.
+ */
+#define KRB5				"KRB5"
+#define SPKM1				"SPKM1"
+#define SPKM2				"SPKM2"
+#define SRP				"SRP"
+#define CHAP				"CHAP"
+
+/*
+ * Initial values for Parameter Negotiation.
+ */
+#define INITIAL_AUTHMETHOD			CHAP
+#define INITIAL_HEADERDIGEST			"CRC32C,None"
+#define INITIAL_DATADIGEST			"CRC32C,None"
+#define INITIAL_MAXCONNECTIONS			"1"
+#define INITIAL_SENDTARGETS			ALL
+#define INITIAL_TARGETNAME			"LIO.Target"
+#define INITIAL_INITIATORNAME			"LIO.Initiator"
+#define INITIAL_TARGETALIAS			"LIO Target"
+#define INITIAL_INITIATORALIAS			"LIO Initiator"
+#define INITIAL_TARGETADDRESS			"0.0.0.0:0000,0"
+#define INITIAL_TARGETPORTALGROUPTAG		"1"
+#define INITIAL_INITIALR2T			YES
+#define INITIAL_IMMEDIATEDATA			YES
+#define INITIAL_MAXRECVDATASEGMENTLENGTH	"8192"
+#define INITIAL_MAXBURSTLENGTH			"262144"
+#define INITIAL_FIRSTBURSTLENGTH		"65536"
+#define INITIAL_DEFAULTTIME2WAIT		"2"
+#define INITIAL_DEFAULTTIME2RETAIN		"20"
+#define INITIAL_MAXOUTSTANDINGR2T		"1"
+#define INITIAL_DATAPDUINORDER			YES
+#define INITIAL_DATASEQUENCEINORDER		YES
+#define INITIAL_ERRORRECOVERYLEVEL		"0"
+#define INITIAL_SESSIONTYPE			NORMAL
+#define INITIAL_IFMARKER			NO
+#define INITIAL_OFMARKER			NO
+#define INITIAL_IFMARKINT			"2048~65535"
+#define INITIAL_OFMARKINT			"2048~65535"
+
+/*
+ * For [Header,Data]Digests.
+ */
+#define CRC32C				"CRC32C"
+
+/*
+ * For SessionType.
+ */
+#define DISCOVERY			"Discovery"
+#define NORMAL				"Normal"
+
+/*
+ * struct iscsi_param->use
+ */
+#define USE_LEADING_ONLY		0x01
+#define USE_INITIAL_ONLY		0x02
+#define USE_ALL				0x04
+
+#define IS_USE_LEADING_ONLY(p)		((p)->use & USE_LEADING_ONLY)
+#define IS_USE_INITIAL_ONLY(p)		((p)->use & USE_INITIAL_ONLY)
+#define IS_USE_ALL(p)			((p)->use & USE_ALL)
+
+#define SET_USE_INITIAL_ONLY(p)		((p)->use |= USE_INITIAL_ONLY)
+
+/*
+ * struct iscsi_param->sender
+ */
+#define	SENDER_INITIATOR		0x01
+#define SENDER_TARGET			0x02
+#define SENDER_BOTH			0x03
+/* Used in iscsi_check_key() */
+#define SENDER_RECEIVER			0x04
+
+#define IS_SENDER_INITIATOR(p)		((p)->sender & SENDER_INITIATOR)
+#define IS_SENDER_TARGET(p)		((p)->sender & SENDER_TARGET)
+#define IS_SENDER_BOTH(p)		((p)->sender & SENDER_BOTH)
+
+/*
+ * struct iscsi_param->scope
+ */
+#define SCOPE_CONNECTION_ONLY		0x01
+#define SCOPE_SESSION_WIDE		0x02
+
+#define IS_SCOPE_CONNECTION_ONLY(p)	((p)->scope & SCOPE_CONNECTION_ONLY)
+#define IS_SCOPE_SESSION_WIDE(p)	((p)->scope & SCOPE_SESSION_WIDE)
+
+/*
+ * struct iscsi_param->phase
+ */
+#define PHASE_SECURITY			0x01
+#define PHASE_OPERATIONAL		0x02
+#define PHASE_DECLARATIVE		0x04
+#define PHASE_FFP0			0x08
+
+#define IS_PHASE_SECURITY(p)		((p)->phase & PHASE_SECURITY)
+#define IS_PHASE_OPERATIONAL(p)		((p)->phase & PHASE_OPERATIONAL)
+#define IS_PHASE_DECLARATIVE(p)		((p)->phase & PHASE_DECLARATIVE)
+#define IS_PHASE_FFP0(p)		((p)->phase & PHASE_FFP0)
+
+/*
+ * struct iscsi_param->type
+ */
+#define TYPE_BOOL_AND			0x01
+#define TYPE_BOOL_OR			0x02
+#define TYPE_NUMBER			0x04
+#define TYPE_NUMBER_RANGE		0x08
+#define TYPE_STRING			0x10
+#define TYPE_VALUE_LIST			0x20
+
+#define IS_TYPE_BOOL_AND(p)		((p)->type & TYPE_BOOL_AND)
+#define IS_TYPE_BOOL_OR(p)		((p)->type & TYPE_BOOL_OR)
+#define IS_TYPE_NUMBER(p)		((p)->type & TYPE_NUMBER)
+#define IS_TYPE_NUMBER_RANGE(p)		((p)->type & TYPE_NUMBER_RANGE)
+#define IS_TYPE_STRING(p)		((p)->type & TYPE_STRING)
+#define IS_TYPE_VALUE_LIST(p)		((p)->type & TYPE_VALUE_LIST)
+
+/*
+ * struct iscsi_param->type_range
+ */
+#define TYPERANGE_BOOL_AND		0x0001
+#define TYPERANGE_BOOL_OR		0x0002
+#define TYPERANGE_0_TO_2		0x0004
+#define TYPERANGE_0_TO_3600		0x0008
+#define TYPERANGE_0_TO_32767		0x0010
+#define TYPERANGE_0_TO_65535		0x0020
+#define TYPERANGE_1_TO_65535		0x0040
+#define TYPERANGE_2_TO_3600		0x0080
+#define TYPERANGE_512_TO_16777215	0x0100
+#define TYPERANGE_AUTH			0x0200
+#define TYPERANGE_DIGEST		0x0400
+#define TYPERANGE_ISCSINAME		0x0800
+#define TYPERANGE_MARKINT		0x1000
+#define TYPERANGE_SESSIONTYPE		0x2000
+#define TYPERANGE_TARGETADDRESS		0x4000
+#define TYPERANGE_UTF8			0x8000
+
+#define IS_TYPERANGE_0_TO_2(p)		((p)->type_range & TYPERANGE_0_TO_2)
+#define IS_TYPERANGE_0_TO_3600(p)	((p)->type_range & TYPERANGE_0_TO_3600)
+#define IS_TYPERANGE_0_TO_32767(p)	((p)->type_range & TYPERANGE_0_TO_32767)
+#define IS_TYPERANGE_0_TO_65535(p)	((p)->type_range & TYPERANGE_0_TO_65535)
+#define IS_TYPERANGE_1_TO_65535(p)	((p)->type_range & TYPERANGE_1_TO_65535)
+#define IS_TYPERANGE_2_TO_3600(p)	((p)->type_range & TYPERANGE_2_TO_3600)
+#define IS_TYPERANGE_512_TO_16777215(p)	((p)->type_range & \
+						TYPERANGE_512_TO_16777215)
+#define IS_TYPERANGE_AUTH_PARAM(p)	((p)->type_range & TYPERANGE_AUTH)
+#define IS_TYPERANGE_DIGEST_PARAM(p)	((p)->type_range & TYPERANGE_DIGEST)
+#define IS_TYPERANGE_SESSIONTYPE(p)	((p)->type_range & \
+						TYPERANGE_SESSIONTYPE)
+
+/*
+ * struct iscsi_param->state
+ */
+#define PSTATE_ACCEPTOR			0x01
+#define PSTATE_NEGOTIATE		0x02
+#define PSTATE_PROPOSER			0x04
+#define PSTATE_IRRELEVANT		0x08
+#define PSTATE_REJECT			0x10
+#define PSTATE_REPLY_OPTIONAL		0x20
+#define PSTATE_RESPONSE_GOT		0x40
+#define PSTATE_RESPONSE_SENT		0x80
+
+#define IS_PSTATE_ACCEPTOR(p)		((p)->state & PSTATE_ACCEPTOR)
+#define IS_PSTATE_NEGOTIATE(p)		((p)->state & PSTATE_NEGOTIATE)
+#define IS_PSTATE_PROPOSER(p)		((p)->state & PSTATE_PROPOSER)
+#define IS_PSTATE_IRRELEVANT(p)		((p)->state & PSTATE_IRRELEVANT)
+#define IS_PSTATE_REJECT(p)		((p)->state & PSTATE_REJECT)
+#define IS_PSTATE_REPLY_OPTIONAL(p)	((p)->state & PSTATE_REPLY_OPTIONAL)
+#define IS_PSTATE_RESPONSE_GOT(p)	((p)->state & PSTATE_RESPONSE_GOT)
+#define IS_PSTATE_RESPONSE_SENT(p)	((p)->state & PSTATE_RESPONSE_SENT)
+
+#define SET_PSTATE_ACCEPTOR(p)		((p)->state |= PSTATE_ACCEPTOR)
+#define SET_PSTATE_NEGOTIATE(p)		((p)->state |= PSTATE_NEGOTIATE)
+#define SET_PSTATE_PROPOSER(p)		((p)->state |= PSTATE_PROPOSER)
+#define SET_PSTATE_IRRELEVANT(p)	((p)->state |= PSTATE_IRRELEVANT)
+#define SET_PSTATE_REJECT(p)		((p)->state |= PSTATE_REJECT)
+#define SET_PSTATE_REPLY_OPTIONAL(p)	((p)->state |= PSTATE_REPLY_OPTIONAL)
+#define SET_PSTATE_RESPONSE_GOT(p)	((p)->state |= PSTATE_RESPONSE_GOT)
+#define SET_PSTATE_RESPONSE_SENT(p)	((p)->state |= PSTATE_RESPONSE_SENT)
+
+#endif /* ISCSI_PARAMETERS_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
new file mode 100644
index 0000000..fc69408
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -0,0 +1,664 @@
+/*******************************************************************************
+ * This file contains main functions related to iSCSI DataSequenceInOrder=No
+ * and DataPDUInOrder=No.
+ *
+ \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/random.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_seq_pdu_list.h"
+
+#define OFFLOAD_BUF_SIZE	32768
+
+void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
+{
+	int i;
+	struct iscsi_seq *seq;
+
+	pr_debug("Dumping Sequence List for ITT: 0x%08x:\n",
+			cmd->init_task_tag);
+
+	for (i = 0; i < cmd->seq_count; i++) {
+		seq = &cmd->seq_list[i];
+		pr_debug("i: %d, pdu_start: %d, pdu_count: %d,"
+			" offset: %d, xfer_len: %d, seq_send_order: %d,"
+			" seq_no: %d\n", i, seq->pdu_start, seq->pdu_count,
+			seq->offset, seq->xfer_len, seq->seq_send_order,
+			seq->seq_no);
+	}
+}
+
+void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
+{
+	int i;
+	struct iscsi_pdu *pdu;
+
+	pr_debug("Dumping PDU List for ITT: 0x%08x:\n",
+			cmd->init_task_tag);
+
+	for (i = 0; i < cmd->pdu_count; i++) {
+		pdu = &cmd->pdu_list[i];
+		pr_debug("i: %d, offset: %d, length: %d,"
+			" pdu_send_order: %d, seq_no: %d\n", i, pdu->offset,
+			pdu->length, pdu->pdu_send_order, pdu->seq_no);
+	}
+}
+
+static void iscsit_ordered_seq_lists(
+	struct iscsi_cmd *cmd,
+	u8 type)
+{
+	u32 i, seq_count = 0;
+
+	for (i = 0; i < cmd->seq_count; i++) {
+		if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
+			continue;
+		cmd->seq_list[i].seq_send_order = seq_count++;
+	}
+}
+
+static void iscsit_ordered_pdu_lists(
+	struct iscsi_cmd *cmd,
+	u8 type)
+{
+	u32 i, pdu_send_order = 0, seq_no = 0;
+
+	for (i = 0; i < cmd->pdu_count; i++) {
+redo:
+		if (cmd->pdu_list[i].seq_no == seq_no) {
+			cmd->pdu_list[i].pdu_send_order = pdu_send_order++;
+			continue;
+		}
+		seq_no++;
+		pdu_send_order = 0;
+		goto redo;
+	}
+}
+
+/*
+ *	Generate count random values into array.
+ *	Use 0x80000000 to mark generates valued in array[].
+ */
+static void iscsit_create_random_array(u32 *array, u32 count)
+{
+	int i, j, k;
+
+	if (count == 1) {
+		array[0] = 0;
+		return;
+	}
+
+	for (i = 0; i < count; i++) {
+redo:
+		get_random_bytes(&j, sizeof(u32));
+		j = (1 + (int) (9999 + 1) - j) % count;
+		for (k = 0; k < i + 1; k++) {
+			j |= 0x80000000;
+			if ((array[k] & 0x80000000) && (array[k] == j))
+				goto redo;
+		}
+		array[i] = j;
+	}
+
+	for (i = 0; i < count; i++)
+		array[i] &= ~0x80000000;
+}
+
+static int iscsit_randomize_pdu_lists(
+	struct iscsi_cmd *cmd,
+	u8 type)
+{
+	int i = 0;
+	u32 *array, pdu_count, seq_count = 0, seq_no = 0, seq_offset = 0;
+
+	for (pdu_count = 0; pdu_count < cmd->pdu_count; pdu_count++) {
+redo:
+		if (cmd->pdu_list[pdu_count].seq_no == seq_no) {
+			seq_count++;
+			continue;
+		}
+		array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
+		if (!array) {
+			pr_err("Unable to allocate memory"
+				" for random array.\n");
+			return -1;
+		}
+		iscsit_create_random_array(array, seq_count);
+
+		for (i = 0; i < seq_count; i++)
+			cmd->pdu_list[seq_offset+i].pdu_send_order = array[i];
+
+		kfree(array);
+
+		seq_offset += seq_count;
+		seq_count = 0;
+		seq_no++;
+		goto redo;
+	}
+
+	if (seq_count) {
+		array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
+		if (!array) {
+			pr_err("Unable to allocate memory for"
+				" random array.\n");
+			return -1;
+		}
+		iscsit_create_random_array(array, seq_count);
+
+		for (i = 0; i < seq_count; i++)
+			cmd->pdu_list[seq_offset+i].pdu_send_order = array[i];
+
+		kfree(array);
+	}
+
+	return 0;
+}
+
+static int iscsit_randomize_seq_lists(
+	struct iscsi_cmd *cmd,
+	u8 type)
+{
+	int i, j = 0;
+	u32 *array, seq_count = cmd->seq_count;
+
+	if ((type == PDULIST_IMMEDIATE) || (type == PDULIST_UNSOLICITED))
+		seq_count--;
+	else if (type == PDULIST_IMMEDIATE_AND_UNSOLICITED)
+		seq_count -= 2;
+
+	if (!seq_count)
+		return 0;
+
+	array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
+	if (!array) {
+		pr_err("Unable to allocate memory for random array.\n");
+		return -1;
+	}
+	iscsit_create_random_array(array, seq_count);
+
+	for (i = 0; i < cmd->seq_count; i++) {
+		if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
+			continue;
+		cmd->seq_list[i].seq_send_order = array[j++];
+	}
+
+	kfree(array);
+	return 0;
+}
+
+static void iscsit_determine_counts_for_list(
+	struct iscsi_cmd *cmd,
+	struct iscsi_build_list *bl,
+	u32 *seq_count,
+	u32 *pdu_count)
+{
+	int check_immediate = 0;
+	u32 burstlength = 0, offset = 0;
+	u32 unsolicited_data_length = 0;
+	struct iscsi_conn *conn = cmd->conn;
+
+	if ((bl->type == PDULIST_IMMEDIATE) ||
+	    (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
+		check_immediate = 1;
+
+	if ((bl->type == PDULIST_UNSOLICITED) ||
+	    (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
+		unsolicited_data_length = (cmd->data_length >
+			conn->sess->sess_ops->FirstBurstLength) ?
+			conn->sess->sess_ops->FirstBurstLength : cmd->data_length;
+
+	while (offset < cmd->data_length) {
+		*pdu_count += 1;
+
+		if (check_immediate) {
+			check_immediate = 0;
+			offset += bl->immediate_data_length;
+			*seq_count += 1;
+			if (unsolicited_data_length)
+				unsolicited_data_length -=
+					bl->immediate_data_length;
+			continue;
+		}
+		if (unsolicited_data_length > 0) {
+			if ((offset + conn->conn_ops->MaxRecvDataSegmentLength)
+					>= cmd->data_length) {
+				unsolicited_data_length -=
+					(cmd->data_length - offset);
+				offset += (cmd->data_length - offset);
+				continue;
+			}
+			if ((offset + conn->conn_ops->MaxRecvDataSegmentLength)
+					>= conn->sess->sess_ops->FirstBurstLength) {
+				unsolicited_data_length -=
+					(conn->sess->sess_ops->FirstBurstLength -
+					offset);
+				offset += (conn->sess->sess_ops->FirstBurstLength -
+					offset);
+				burstlength = 0;
+				*seq_count += 1;
+				continue;
+			}
+
+			offset += conn->conn_ops->MaxRecvDataSegmentLength;
+			unsolicited_data_length -=
+				conn->conn_ops->MaxRecvDataSegmentLength;
+			continue;
+		}
+		if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
+		     cmd->data_length) {
+			offset += (cmd->data_length - offset);
+			continue;
+		}
+		if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >=
+		     conn->sess->sess_ops->MaxBurstLength) {
+			offset += (conn->sess->sess_ops->MaxBurstLength -
+					burstlength);
+			burstlength = 0;
+			*seq_count += 1;
+			continue;
+		}
+
+		burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
+		offset += conn->conn_ops->MaxRecvDataSegmentLength;
+	}
+}
+
+
+/*
+ *	Builds PDU and/or Sequence list,  called while DataSequenceInOrder=No
+ *	and DataPDUInOrder=No.
+ */
+static int iscsit_build_pdu_and_seq_list(
+	struct iscsi_cmd *cmd,
+	struct iscsi_build_list *bl)
+{
+	int check_immediate = 0, datapduinorder, datasequenceinorder;
+	u32 burstlength = 0, offset = 0, i = 0;
+	u32 pdu_count = 0, seq_no = 0, unsolicited_data_length = 0;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_pdu *pdu = cmd->pdu_list;
+	struct iscsi_seq *seq = cmd->seq_list;
+
+	datapduinorder = conn->sess->sess_ops->DataPDUInOrder;
+	datasequenceinorder = conn->sess->sess_ops->DataSequenceInOrder;
+
+	if ((bl->type == PDULIST_IMMEDIATE) ||
+	    (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
+		check_immediate = 1;
+
+	if ((bl->type == PDULIST_UNSOLICITED) ||
+	    (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
+		unsolicited_data_length = (cmd->data_length >
+			conn->sess->sess_ops->FirstBurstLength) ?
+			conn->sess->sess_ops->FirstBurstLength : cmd->data_length;
+
+	while (offset < cmd->data_length) {
+		pdu_count++;
+		if (!datapduinorder) {
+			pdu[i].offset = offset;
+			pdu[i].seq_no = seq_no;
+		}
+		if (!datasequenceinorder && (pdu_count == 1)) {
+			seq[seq_no].pdu_start = i;
+			seq[seq_no].seq_no = seq_no;
+			seq[seq_no].offset = offset;
+			seq[seq_no].orig_offset = offset;
+		}
+
+		if (check_immediate) {
+			check_immediate = 0;
+			if (!datapduinorder) {
+				pdu[i].type = PDUTYPE_IMMEDIATE;
+				pdu[i++].length = bl->immediate_data_length;
+			}
+			if (!datasequenceinorder) {
+				seq[seq_no].type = SEQTYPE_IMMEDIATE;
+				seq[seq_no].pdu_count = 1;
+				seq[seq_no].xfer_len =
+					bl->immediate_data_length;
+			}
+			offset += bl->immediate_data_length;
+			pdu_count = 0;
+			seq_no++;
+			if (unsolicited_data_length)
+				unsolicited_data_length -=
+					bl->immediate_data_length;
+			continue;
+		}
+		if (unsolicited_data_length > 0) {
+			if ((offset +
+			     conn->conn_ops->MaxRecvDataSegmentLength) >=
+			     cmd->data_length) {
+				if (!datapduinorder) {
+					pdu[i].type = PDUTYPE_UNSOLICITED;
+					pdu[i].length =
+						(cmd->data_length - offset);
+				}
+				if (!datasequenceinorder) {
+					seq[seq_no].type = SEQTYPE_UNSOLICITED;
+					seq[seq_no].pdu_count = pdu_count;
+					seq[seq_no].xfer_len = (burstlength +
+						(cmd->data_length - offset));
+				}
+				unsolicited_data_length -=
+						(cmd->data_length - offset);
+				offset += (cmd->data_length - offset);
+				continue;
+			}
+			if ((offset +
+			     conn->conn_ops->MaxRecvDataSegmentLength) >=
+					conn->sess->sess_ops->FirstBurstLength) {
+				if (!datapduinorder) {
+					pdu[i].type = PDUTYPE_UNSOLICITED;
+					pdu[i++].length =
+					   (conn->sess->sess_ops->FirstBurstLength -
+						offset);
+				}
+				if (!datasequenceinorder) {
+					seq[seq_no].type = SEQTYPE_UNSOLICITED;
+					seq[seq_no].pdu_count = pdu_count;
+					seq[seq_no].xfer_len = (burstlength +
+					   (conn->sess->sess_ops->FirstBurstLength -
+						offset));
+				}
+				unsolicited_data_length -=
+					(conn->sess->sess_ops->FirstBurstLength -
+						offset);
+				offset += (conn->sess->sess_ops->FirstBurstLength -
+						offset);
+				burstlength = 0;
+				pdu_count = 0;
+				seq_no++;
+				continue;
+			}
+
+			if (!datapduinorder) {
+				pdu[i].type = PDUTYPE_UNSOLICITED;
+				pdu[i++].length =
+				     conn->conn_ops->MaxRecvDataSegmentLength;
+			}
+			burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
+			offset += conn->conn_ops->MaxRecvDataSegmentLength;
+			unsolicited_data_length -=
+				conn->conn_ops->MaxRecvDataSegmentLength;
+			continue;
+		}
+		if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
+		     cmd->data_length) {
+			if (!datapduinorder) {
+				pdu[i].type = PDUTYPE_NORMAL;
+				pdu[i].length = (cmd->data_length - offset);
+			}
+			if (!datasequenceinorder) {
+				seq[seq_no].type = SEQTYPE_NORMAL;
+				seq[seq_no].pdu_count = pdu_count;
+				seq[seq_no].xfer_len = (burstlength +
+					(cmd->data_length - offset));
+			}
+			offset += (cmd->data_length - offset);
+			continue;
+		}
+		if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >=
+		     conn->sess->sess_ops->MaxBurstLength) {
+			if (!datapduinorder) {
+				pdu[i].type = PDUTYPE_NORMAL;
+				pdu[i++].length =
+					(conn->sess->sess_ops->MaxBurstLength -
+						burstlength);
+			}
+			if (!datasequenceinorder) {
+				seq[seq_no].type = SEQTYPE_NORMAL;
+				seq[seq_no].pdu_count = pdu_count;
+				seq[seq_no].xfer_len = (burstlength +
+					(conn->sess->sess_ops->MaxBurstLength -
+					burstlength));
+			}
+			offset += (conn->sess->sess_ops->MaxBurstLength -
+					burstlength);
+			burstlength = 0;
+			pdu_count = 0;
+			seq_no++;
+			continue;
+		}
+
+		if (!datapduinorder) {
+			pdu[i].type = PDUTYPE_NORMAL;
+			pdu[i++].length =
+				conn->conn_ops->MaxRecvDataSegmentLength;
+		}
+		burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
+		offset += conn->conn_ops->MaxRecvDataSegmentLength;
+	}
+
+	if (!datasequenceinorder) {
+		if (bl->data_direction & ISCSI_PDU_WRITE) {
+			if (bl->randomize & RANDOM_R2T_OFFSETS) {
+				if (iscsit_randomize_seq_lists(cmd, bl->type)
+						< 0)
+					return -1;
+			} else
+				iscsit_ordered_seq_lists(cmd, bl->type);
+		} else if (bl->data_direction & ISCSI_PDU_READ) {
+			if (bl->randomize & RANDOM_DATAIN_SEQ_OFFSETS) {
+				if (iscsit_randomize_seq_lists(cmd, bl->type)
+						< 0)
+					return -1;
+			} else
+				iscsit_ordered_seq_lists(cmd, bl->type);
+		}
+#if 0
+		iscsit_dump_seq_list(cmd);
+#endif
+	}
+	if (!datapduinorder) {
+		if (bl->data_direction & ISCSI_PDU_WRITE) {
+			if (bl->randomize & RANDOM_DATAOUT_PDU_OFFSETS) {
+				if (iscsit_randomize_pdu_lists(cmd, bl->type)
+						< 0)
+					return -1;
+			} else
+				iscsit_ordered_pdu_lists(cmd, bl->type);
+		} else if (bl->data_direction & ISCSI_PDU_READ) {
+			if (bl->randomize & RANDOM_DATAIN_PDU_OFFSETS) {
+				if (iscsit_randomize_pdu_lists(cmd, bl->type)
+						< 0)
+					return -1;
+			} else
+				iscsit_ordered_pdu_lists(cmd, bl->type);
+		}
+#if 0
+		iscsit_dump_pdu_list(cmd);
+#endif
+	}
+
+	return 0;
+}
+
+/*
+ *	Only called while DataSequenceInOrder=No or DataPDUInOrder=No.
+ */
+int iscsit_do_build_list(
+	struct iscsi_cmd *cmd,
+	struct iscsi_build_list *bl)
+{
+	u32 pdu_count = 0, seq_count = 1;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_pdu *pdu = NULL;
+	struct iscsi_seq *seq = NULL;
+
+	iscsit_determine_counts_for_list(cmd, bl, &seq_count, &pdu_count);
+
+	if (!conn->sess->sess_ops->DataSequenceInOrder) {
+		seq = kzalloc(seq_count * sizeof(struct iscsi_seq), GFP_ATOMIC);
+		if (!seq) {
+			pr_err("Unable to allocate struct iscsi_seq list\n");
+			return -1;
+		}
+		cmd->seq_list = seq;
+		cmd->seq_count = seq_count;
+	}
+
+	if (!conn->sess->sess_ops->DataPDUInOrder) {
+		pdu = kzalloc(pdu_count * sizeof(struct iscsi_pdu), GFP_ATOMIC);
+		if (!pdu) {
+			pr_err("Unable to allocate struct iscsi_pdu list.\n");
+			kfree(seq);
+			return -1;
+		}
+		cmd->pdu_list = pdu;
+		cmd->pdu_count = pdu_count;
+	}
+
+	return iscsit_build_pdu_and_seq_list(cmd, bl);
+}
+
+struct iscsi_pdu *iscsit_get_pdu_holder(
+	struct iscsi_cmd *cmd,
+	u32 offset,
+	u32 length)
+{
+	u32 i;
+	struct iscsi_pdu *pdu = NULL;
+
+	if (!cmd->pdu_list) {
+		pr_err("struct iscsi_cmd->pdu_list is NULL!\n");
+		return NULL;
+	}
+
+	pdu = &cmd->pdu_list[0];
+
+	for (i = 0; i < cmd->pdu_count; i++)
+		if ((pdu[i].offset == offset) && (pdu[i].length == length))
+			return &pdu[i];
+
+	pr_err("Unable to locate PDU holder for ITT: 0x%08x, Offset:"
+		" %u, Length: %u\n", cmd->init_task_tag, offset, length);
+	return NULL;
+}
+
+struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(
+	struct iscsi_cmd *cmd,
+	struct iscsi_seq *seq)
+{
+	u32 i;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_pdu *pdu = NULL;
+
+	if (!cmd->pdu_list) {
+		pr_err("struct iscsi_cmd->pdu_list is NULL!\n");
+		return NULL;
+	}
+
+	if (conn->sess->sess_ops->DataSequenceInOrder) {
+redo:
+		pdu = &cmd->pdu_list[cmd->pdu_start];
+
+		for (i = 0; pdu[i].seq_no != cmd->seq_no; i++) {
+#if 0
+			pr_debug("pdu[i].seq_no: %d, pdu[i].pdu"
+				"_send_order: %d, pdu[i].offset: %d,"
+				" pdu[i].length: %d\n", pdu[i].seq_no,
+				pdu[i].pdu_send_order, pdu[i].offset,
+				pdu[i].length);
+#endif
+			if (pdu[i].pdu_send_order == cmd->pdu_send_order) {
+				cmd->pdu_send_order++;
+				return &pdu[i];
+			}
+		}
+
+		cmd->pdu_start += cmd->pdu_send_order;
+		cmd->pdu_send_order = 0;
+		cmd->seq_no++;
+
+		if (cmd->pdu_start < cmd->pdu_count)
+			goto redo;
+
+		pr_err("Command ITT: 0x%08x unable to locate"
+			" struct iscsi_pdu for cmd->pdu_send_order: %u.\n",
+			cmd->init_task_tag, cmd->pdu_send_order);
+		return NULL;
+	} else {
+		if (!seq) {
+			pr_err("struct iscsi_seq is NULL!\n");
+			return NULL;
+		}
+#if 0
+		pr_debug("seq->pdu_start: %d, seq->pdu_count: %d,"
+			" seq->seq_no: %d\n", seq->pdu_start, seq->pdu_count,
+			seq->seq_no);
+#endif
+		pdu = &cmd->pdu_list[seq->pdu_start];
+
+		if (seq->pdu_send_order == seq->pdu_count) {
+			pr_err("Command ITT: 0x%08x seq->pdu_send"
+				"_order: %u equals seq->pdu_count: %u\n",
+				cmd->init_task_tag, seq->pdu_send_order,
+				seq->pdu_count);
+			return NULL;
+		}
+
+		for (i = 0; i < seq->pdu_count; i++) {
+			if (pdu[i].pdu_send_order == seq->pdu_send_order) {
+				seq->pdu_send_order++;
+				return &pdu[i];
+			}
+		}
+
+		pr_err("Command ITT: 0x%08x unable to locate iscsi"
+			"_pdu_t for seq->pdu_send_order: %u.\n",
+			cmd->init_task_tag, seq->pdu_send_order);
+		return NULL;
+	}
+
+	return NULL;
+}
+
+struct iscsi_seq *iscsit_get_seq_holder(
+	struct iscsi_cmd *cmd,
+	u32 offset,
+	u32 length)
+{
+	u32 i;
+
+	if (!cmd->seq_list) {
+		pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+		return NULL;
+	}
+
+	for (i = 0; i < cmd->seq_count; i++) {
+#if 0
+		pr_debug("seq_list[i].orig_offset: %d, seq_list[i]."
+			"xfer_len: %d, seq_list[i].seq_no %u\n",
+			cmd->seq_list[i].orig_offset, cmd->seq_list[i].xfer_len,
+			cmd->seq_list[i].seq_no);
+#endif
+		if ((cmd->seq_list[i].orig_offset +
+				cmd->seq_list[i].xfer_len) >=
+				(offset + length))
+			return &cmd->seq_list[i];
+	}
+
+	pr_err("Unable to locate Sequence holder for ITT: 0x%08x,"
+		" Offset: %u, Length: %u\n", cmd->init_task_tag, offset,
+		length);
+	return NULL;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_seq_pdu_list.h b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
new file mode 100644
index 0000000..0d52a10
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
@@ -0,0 +1,86 @@
+#ifndef ISCSI_SEQ_AND_PDU_LIST_H
+#define ISCSI_SEQ_AND_PDU_LIST_H
+
+/* struct iscsi_pdu->status */
+#define DATAOUT_PDU_SENT			1
+
+/* struct iscsi_seq->type */
+#define SEQTYPE_IMMEDIATE			1
+#define SEQTYPE_UNSOLICITED			2
+#define SEQTYPE_NORMAL				3
+
+/* struct iscsi_seq->status */
+#define DATAOUT_SEQUENCE_GOT_R2T		1
+#define DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY 2
+#define DATAOUT_SEQUENCE_COMPLETE		3
+
+/* iscsi_determine_counts_for_list() type */
+#define PDULIST_NORMAL				1
+#define PDULIST_IMMEDIATE			2
+#define PDULIST_UNSOLICITED			3
+#define PDULIST_IMMEDIATE_AND_UNSOLICITED	4
+
+/* struct iscsi_pdu->type */
+#define PDUTYPE_IMMEDIATE			1
+#define PDUTYPE_UNSOLICITED			2
+#define PDUTYPE_NORMAL				3
+
+/* struct iscsi_pdu->status */
+#define ISCSI_PDU_NOT_RECEIVED			0
+#define ISCSI_PDU_RECEIVED_OK			1
+#define ISCSI_PDU_CRC_FAILED			2
+#define ISCSI_PDU_TIMED_OUT			3
+
+/* struct iscsi_build_list->randomize */
+#define RANDOM_DATAIN_PDU_OFFSETS		0x01
+#define RANDOM_DATAIN_SEQ_OFFSETS		0x02
+#define RANDOM_DATAOUT_PDU_OFFSETS		0x04
+#define RANDOM_R2T_OFFSETS			0x08
+
+/* struct iscsi_build_list->data_direction */
+#define ISCSI_PDU_READ				0x01
+#define ISCSI_PDU_WRITE				0x02
+
+struct iscsi_build_list {
+	int		data_direction;
+	int		randomize;
+	int		type;
+	int		immediate_data_length;
+};
+
+struct iscsi_pdu {
+	int		status;
+	int		type;
+	u8		flags;
+	u32		data_sn;
+	u32		length;
+	u32		offset;
+	u32		pdu_send_order;
+	u32		seq_no;
+} ____cacheline_aligned;
+
+struct iscsi_seq {
+	int		sent;
+	int		status;
+	int		type;
+	u32		data_sn;
+	u32		first_datasn;
+	u32		last_datasn;
+	u32		next_burst_len;
+	u32		pdu_start;
+	u32		pdu_count;
+	u32		offset;
+	u32		orig_offset;
+	u32		pdu_send_order;
+	u32		r2t_sn;
+	u32		seq_send_order;
+	u32		seq_no;
+	u32		xfer_len;
+} ____cacheline_aligned;
+
+extern int iscsit_do_build_list(struct iscsi_cmd *, struct iscsi_build_list *);
+extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32);
+extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *);
+extern struct iscsi_seq *iscsit_get_seq_holder(struct iscsi_cmd *, u32, u32);
+
+#endif /* ISCSI_SEQ_AND_PDU_LIST_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_stat.c b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_stat.c
new file mode 100644
index 0000000..421d694
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_stat.c
@@ -0,0 +1,950 @@
+/*******************************************************************************
+ * Modern ConfigFS group context specific iSCSI statistics based on original
+ * iscsi_target_mib.c code
+ *
+ * Copyright (c) 2011 Rising Tide Systems
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/configfs.h>
+#include <linux/export.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/configfs_macros.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_stat.h"
+
+#ifndef INITIAL_JIFFIES
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+#endif
+
+/* Instance Attributes Table */
+#define ISCSI_INST_NUM_NODES		1
+#define ISCSI_INST_DESCR		"Storage Engine Target"
+#define ISCSI_INST_LAST_FAILURE_TYPE	0
+#define ISCSI_DISCONTINUITY_TIME	0
+
+#define ISCSI_NODE_INDEX		1
+
+#define ISPRINT(a)   ((a >= ' ') && (a <= '~'))
+
+/****************************************************************************
+ * iSCSI MIB Tables
+ ****************************************************************************/
+/*
+ * Instance Attributes Table
+ */
+CONFIGFS_EATTR_STRUCT(iscsi_stat_instance, iscsi_wwn_stat_grps);
+#define ISCSI_STAT_INSTANCE_ATTR(_name, _mode)			\
+static struct iscsi_stat_instance_attribute			\
+			iscsi_stat_instance_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,				\
+	iscsi_stat_instance_show_attr_##_name,			\
+	iscsi_stat_instance_store_attr_##_name);
+
+#define ISCSI_STAT_INSTANCE_ATTR_RO(_name)			\
+static struct iscsi_stat_instance_attribute			\
+			iscsi_stat_instance_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,				\
+	iscsi_stat_instance_show_attr_##_name);
+
+static ssize_t iscsi_stat_instance_show_attr_inst(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+
+	return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(inst);
+
+static ssize_t iscsi_stat_instance_show_attr_min_ver(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(min_ver);
+
+static ssize_t iscsi_stat_instance_show_attr_max_ver(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(max_ver);
+
+static ssize_t iscsi_stat_instance_show_attr_portals(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+
+	return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_num_tpg_nps);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(portals);
+
+static ssize_t iscsi_stat_instance_show_attr_nodes(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_INST_NUM_NODES);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(nodes);
+
+static ssize_t iscsi_stat_instance_show_attr_sessions(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+
+	return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_nsessions);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(sessions);
+
+static ssize_t iscsi_stat_instance_show_attr_fail_sess(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+	struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+	u32 sess_err_count;
+
+	spin_lock_bh(&sess_err->lock);
+	sess_err_count = (sess_err->digest_errors +
+			  sess_err->cxn_timeout_errors +
+			  sess_err->pdu_format_errors);
+	spin_unlock_bh(&sess_err->lock);
+
+	return snprintf(page, PAGE_SIZE, "%u\n", sess_err_count);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(fail_sess);
+
+static ssize_t iscsi_stat_instance_show_attr_fail_type(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+	struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+	return snprintf(page, PAGE_SIZE, "%u\n",
+			sess_err->last_sess_failure_type);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(fail_type);
+
+static ssize_t iscsi_stat_instance_show_attr_fail_rem_name(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+	struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+	return snprintf(page, PAGE_SIZE, "%s\n",
+			sess_err->last_sess_fail_rem_name[0] ?
+			sess_err->last_sess_fail_rem_name : NONE);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(fail_rem_name);
+
+static ssize_t iscsi_stat_instance_show_attr_disc_time(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DISCONTINUITY_TIME);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(disc_time);
+
+static ssize_t iscsi_stat_instance_show_attr_description(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%s\n", ISCSI_INST_DESCR);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(description);
+
+static ssize_t iscsi_stat_instance_show_attr_vendor(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "RisingTide Systems iSCSI-Target\n");
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(vendor);
+
+static ssize_t iscsi_stat_instance_show_attr_version(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%s\n", ISCSIT_VERSION);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(version);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_instance, iscsi_wwn_stat_grps,
+		iscsi_instance_group);
+
+static struct configfs_attribute *iscsi_stat_instance_attrs[] = {
+	&iscsi_stat_instance_inst.attr,
+	&iscsi_stat_instance_min_ver.attr,
+	&iscsi_stat_instance_max_ver.attr,
+	&iscsi_stat_instance_portals.attr,
+	&iscsi_stat_instance_nodes.attr,
+	&iscsi_stat_instance_sessions.attr,
+	&iscsi_stat_instance_fail_sess.attr,
+	&iscsi_stat_instance_fail_type.attr,
+	&iscsi_stat_instance_fail_rem_name.attr,
+	&iscsi_stat_instance_disc_time.attr,
+	&iscsi_stat_instance_description.attr,
+	&iscsi_stat_instance_vendor.attr,
+	&iscsi_stat_instance_version.attr,
+	NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_instance_item_ops = {
+	.show_attribute		= iscsi_stat_instance_attr_show,
+	.store_attribute	= iscsi_stat_instance_attr_store,
+};
+
+struct config_item_type iscsi_stat_instance_cit = {
+	.ct_item_ops		= &iscsi_stat_instance_item_ops,
+	.ct_attrs		= iscsi_stat_instance_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * Instance Session Failure Stats Table
+ */
+CONFIGFS_EATTR_STRUCT(iscsi_stat_sess_err, iscsi_wwn_stat_grps);
+#define ISCSI_STAT_SESS_ERR_ATTR(_name, _mode)			\
+static struct iscsi_stat_sess_err_attribute			\
+			iscsi_stat_sess_err_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,				\
+	iscsi_stat_sess_err_show_attr_##_name,			\
+	iscsi_stat_sess_err_store_attr_##_name);
+
+#define ISCSI_STAT_SESS_ERR_ATTR_RO(_name)			\
+static struct iscsi_stat_sess_err_attribute			\
+			iscsi_stat_sess_err_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,				\
+	iscsi_stat_sess_err_show_attr_##_name);
+
+static ssize_t iscsi_stat_sess_err_show_attr_inst(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+
+	return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_SESS_ERR_ATTR_RO(inst);
+
+static ssize_t iscsi_stat_sess_err_show_attr_digest_errors(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+	struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", sess_err->digest_errors);
+}
+ISCSI_STAT_SESS_ERR_ATTR_RO(digest_errors);
+
+static ssize_t iscsi_stat_sess_err_show_attr_cxn_errors(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+	struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", sess_err->cxn_timeout_errors);
+}
+ISCSI_STAT_SESS_ERR_ATTR_RO(cxn_errors);
+
+static ssize_t iscsi_stat_sess_err_show_attr_format_errors(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+	struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", sess_err->pdu_format_errors);
+}
+ISCSI_STAT_SESS_ERR_ATTR_RO(format_errors);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_sess_err, iscsi_wwn_stat_grps,
+		iscsi_sess_err_group);
+
+static struct configfs_attribute *iscsi_stat_sess_err_attrs[] = {
+	&iscsi_stat_sess_err_inst.attr,
+	&iscsi_stat_sess_err_digest_errors.attr,
+	&iscsi_stat_sess_err_cxn_errors.attr,
+	&iscsi_stat_sess_err_format_errors.attr,
+	NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_sess_err_item_ops = {
+	.show_attribute		= iscsi_stat_sess_err_attr_show,
+	.store_attribute	= iscsi_stat_sess_err_attr_store,
+};
+
+struct config_item_type iscsi_stat_sess_err_cit = {
+	.ct_item_ops		= &iscsi_stat_sess_err_item_ops,
+	.ct_attrs		= iscsi_stat_sess_err_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * Target Attributes Table
+ */
+CONFIGFS_EATTR_STRUCT(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps);
+#define ISCSI_STAT_TGT_ATTR(_name, _mode)			\
+static struct iscsi_stat_tgt_attr_attribute			\
+			iscsi_stat_tgt_attr_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,				\
+	iscsi_stat_tgt-attr_show_attr_##_name,			\
+	iscsi_stat_tgt_attr_store_attr_##_name);
+
+#define ISCSI_STAT_TGT_ATTR_RO(_name)				\
+static struct iscsi_stat_tgt_attr_attribute			\
+			iscsi_stat_tgt_attr_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,				\
+	iscsi_stat_tgt_attr_show_attr_##_name);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_inst(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+
+	return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_TGT_ATTR_RO(inst);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_indx(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
+}
+ISCSI_STAT_TGT_ATTR_RO(indx);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_login_fails(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	u32 fail_count;
+
+	spin_lock(&lstat->lock);
+	fail_count = (lstat->redirects + lstat->authorize_fails +
+			lstat->authenticate_fails + lstat->negotiate_fails +
+			lstat->other_fails);
+	spin_unlock(&lstat->lock);
+
+	return snprintf(page, PAGE_SIZE, "%u\n", fail_count);
+}
+ISCSI_STAT_TGT_ATTR_RO(login_fails);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_time(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	u32 last_fail_time;
+
+	spin_lock(&lstat->lock);
+	last_fail_time = lstat->last_fail_time ?
+			(u32)(((u32)lstat->last_fail_time -
+				INITIAL_JIFFIES) * 100 / HZ) : 0;
+	spin_unlock(&lstat->lock);
+
+	return snprintf(page, PAGE_SIZE, "%u\n", last_fail_time);
+}
+ISCSI_STAT_TGT_ATTR_RO(last_fail_time);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_type(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	u32 last_fail_type;
+
+	spin_lock(&lstat->lock);
+	last_fail_type = lstat->last_fail_type;
+	spin_unlock(&lstat->lock);
+
+	return snprintf(page, PAGE_SIZE, "%u\n", last_fail_type);
+}
+ISCSI_STAT_TGT_ATTR_RO(last_fail_type);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_name(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	unsigned char buf[224];
+
+	spin_lock(&lstat->lock);
+	snprintf(buf, 224, "%s", lstat->last_intr_fail_name[0] ?
+				lstat->last_intr_fail_name : NONE);
+	spin_unlock(&lstat->lock);
+
+	return snprintf(page, PAGE_SIZE, "%s\n", buf);
+}
+ISCSI_STAT_TGT_ATTR_RO(fail_intr_name);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+			struct iscsi_tiqn, tiqn_stat_grps);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	unsigned char buf[8];
+
+	spin_lock(&lstat->lock);
+	snprintf(buf, 8, "%s", (lstat->last_intr_fail_ip_addr != NULL) ?
+				"ipv6" : "ipv4");
+	spin_unlock(&lstat->lock);
+
+	return snprintf(page, PAGE_SIZE, "%s\n", buf);
+}
+ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr_type);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+			struct iscsi_tiqn, tiqn_stat_grps);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	unsigned char buf[32];
+
+	spin_lock(&lstat->lock);
+	if (lstat->last_intr_fail_ip_family == AF_INET6)
+		snprintf(buf, 32, "[%s]", lstat->last_intr_fail_ip_addr);
+	else
+		snprintf(buf, 32, "%s", lstat->last_intr_fail_ip_addr);
+	spin_unlock(&lstat->lock);
+
+	return snprintf(page, PAGE_SIZE, "%s\n", buf);
+}
+ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps,
+		iscsi_tgt_attr_group);
+
+static struct configfs_attribute *iscsi_stat_tgt_attr_attrs[] = {
+	&iscsi_stat_tgt_attr_inst.attr,
+	&iscsi_stat_tgt_attr_indx.attr,
+	&iscsi_stat_tgt_attr_login_fails.attr,
+	&iscsi_stat_tgt_attr_last_fail_time.attr,
+	&iscsi_stat_tgt_attr_last_fail_type.attr,
+	&iscsi_stat_tgt_attr_fail_intr_name.attr,
+	&iscsi_stat_tgt_attr_fail_intr_addr_type.attr,
+	&iscsi_stat_tgt_attr_fail_intr_addr.attr,
+	NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_tgt_attr_item_ops = {
+	.show_attribute		= iscsi_stat_tgt_attr_attr_show,
+	.store_attribute	= iscsi_stat_tgt_attr_attr_store,
+};
+
+struct config_item_type iscsi_stat_tgt_attr_cit = {
+	.ct_item_ops		= &iscsi_stat_tgt_attr_item_ops,
+	.ct_attrs		= iscsi_stat_tgt_attr_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * Target Login Stats Table
+ */
+CONFIGFS_EATTR_STRUCT(iscsi_stat_login, iscsi_wwn_stat_grps);
+#define ISCSI_STAT_LOGIN(_name, _mode)				\
+static struct iscsi_stat_login_attribute			\
+			iscsi_stat_login_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,				\
+	iscsi_stat_login_show_attr_##_name,			\
+	iscsi_stat_login_store_attr_##_name);
+
+#define ISCSI_STAT_LOGIN_RO(_name)				\
+static struct iscsi_stat_login_attribute			\
+			iscsi_stat_login_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,				\
+	iscsi_stat_login_show_attr_##_name);
+
+static ssize_t iscsi_stat_login_show_attr_inst(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+
+	return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_LOGIN_RO(inst);
+
+static ssize_t iscsi_stat_login_show_attr_indx(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
+}
+ISCSI_STAT_LOGIN_RO(indx);
+
+static ssize_t iscsi_stat_login_show_attr_accepts(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	ssize_t ret;
+
+	spin_lock(&lstat->lock);
+	ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->accepts);
+	spin_unlock(&lstat->lock);
+
+	return ret;
+}
+ISCSI_STAT_LOGIN_RO(accepts);
+
+static ssize_t iscsi_stat_login_show_attr_other_fails(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	ssize_t ret;
+
+	spin_lock(&lstat->lock);
+	ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->other_fails);
+	spin_unlock(&lstat->lock);
+
+	return ret;
+}
+ISCSI_STAT_LOGIN_RO(other_fails);
+
+static ssize_t iscsi_stat_login_show_attr_redirects(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	ssize_t ret;
+
+	spin_lock(&lstat->lock);
+	ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->redirects);
+	spin_unlock(&lstat->lock);
+
+	return ret;
+}
+ISCSI_STAT_LOGIN_RO(redirects);
+
+static ssize_t iscsi_stat_login_show_attr_authorize_fails(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	ssize_t ret;
+
+	spin_lock(&lstat->lock);
+	ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authorize_fails);
+	spin_unlock(&lstat->lock);
+
+	return ret;
+}
+ISCSI_STAT_LOGIN_RO(authorize_fails);
+
+static ssize_t iscsi_stat_login_show_attr_authenticate_fails(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	ssize_t ret;
+
+	spin_lock(&lstat->lock);
+	ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authenticate_fails);
+	spin_unlock(&lstat->lock);
+
+	return ret;
+}
+ISCSI_STAT_LOGIN_RO(authenticate_fails);
+
+static ssize_t iscsi_stat_login_show_attr_negotiate_fails(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+				struct iscsi_tiqn, tiqn_stat_grps);
+	struct iscsi_login_stats *lstat = &tiqn->login_stats;
+	ssize_t ret;
+
+	spin_lock(&lstat->lock);
+	ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->negotiate_fails);
+	spin_unlock(&lstat->lock);
+
+	return ret;
+}
+ISCSI_STAT_LOGIN_RO(negotiate_fails);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_login, iscsi_wwn_stat_grps,
+		iscsi_login_stats_group);
+
+static struct configfs_attribute *iscsi_stat_login_stats_attrs[] = {
+	&iscsi_stat_login_inst.attr,
+	&iscsi_stat_login_indx.attr,
+	&iscsi_stat_login_accepts.attr,
+	&iscsi_stat_login_other_fails.attr,
+	&iscsi_stat_login_redirects.attr,
+	&iscsi_stat_login_authorize_fails.attr,
+	&iscsi_stat_login_authenticate_fails.attr,
+	&iscsi_stat_login_negotiate_fails.attr,
+	NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_login_stats_item_ops = {
+	.show_attribute		= iscsi_stat_login_attr_show,
+	.store_attribute	= iscsi_stat_login_attr_store,
+};
+
+struct config_item_type iscsi_stat_login_cit = {
+	.ct_item_ops		= &iscsi_stat_login_stats_item_ops,
+	.ct_attrs		= iscsi_stat_login_stats_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * Target Logout Stats Table
+ */
+
+CONFIGFS_EATTR_STRUCT(iscsi_stat_logout, iscsi_wwn_stat_grps);
+#define ISCSI_STAT_LOGOUT(_name, _mode)				\
+static struct iscsi_stat_logout_attribute			\
+			iscsi_stat_logout_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,				\
+	iscsi_stat_logout_show_attr_##_name,			\
+	iscsi_stat_logout_store_attr_##_name);
+
+#define ISCSI_STAT_LOGOUT_RO(_name)				\
+static struct iscsi_stat_logout_attribute			\
+			iscsi_stat_logout_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,				\
+	iscsi_stat_logout_show_attr_##_name);
+
+static ssize_t iscsi_stat_logout_show_attr_inst(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+			struct iscsi_tiqn, tiqn_stat_grps);
+
+	return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_LOGOUT_RO(inst);
+
+static ssize_t iscsi_stat_logout_show_attr_indx(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
+}
+ISCSI_STAT_LOGOUT_RO(indx);
+
+static ssize_t iscsi_stat_logout_show_attr_normal_logouts(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+			struct iscsi_tiqn, tiqn_stat_grps);
+	struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", lstats->normal_logouts);
+}
+ISCSI_STAT_LOGOUT_RO(normal_logouts);
+
+static ssize_t iscsi_stat_logout_show_attr_abnormal_logouts(
+	struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+	struct iscsi_tiqn *tiqn = container_of(igrps,
+			struct iscsi_tiqn, tiqn_stat_grps);
+	struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", lstats->abnormal_logouts);
+}
+ISCSI_STAT_LOGOUT_RO(abnormal_logouts);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_logout, iscsi_wwn_stat_grps,
+		iscsi_logout_stats_group);
+
+static struct configfs_attribute *iscsi_stat_logout_stats_attrs[] = {
+	&iscsi_stat_logout_inst.attr,
+	&iscsi_stat_logout_indx.attr,
+	&iscsi_stat_logout_normal_logouts.attr,
+	&iscsi_stat_logout_abnormal_logouts.attr,
+	NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_logout_stats_item_ops = {
+	.show_attribute		= iscsi_stat_logout_attr_show,
+	.store_attribute	= iscsi_stat_logout_attr_store,
+};
+
+struct config_item_type iscsi_stat_logout_cit = {
+	.ct_item_ops		= &iscsi_stat_logout_stats_item_ops,
+	.ct_attrs		= iscsi_stat_logout_stats_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * Session Stats Table
+ */
+
+CONFIGFS_EATTR_STRUCT(iscsi_stat_sess, iscsi_node_stat_grps);
+#define ISCSI_STAT_SESS(_name, _mode)				\
+static struct iscsi_stat_sess_attribute				\
+			iscsi_stat_sess_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,				\
+	iscsi_stat_sess_show_attr_##_name,			\
+	iscsi_stat_sess_store_attr_##_name);
+
+#define ISCSI_STAT_SESS_RO(_name)				\
+static struct iscsi_stat_sess_attribute				\
+			iscsi_stat_sess_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,				\
+	iscsi_stat_sess_show_attr_##_name);
+
+static ssize_t iscsi_stat_sess_show_attr_inst(
+	struct iscsi_node_stat_grps *igrps, char *page)
+{
+	struct iscsi_node_acl *acl = container_of(igrps,
+			struct iscsi_node_acl, node_stat_grps);
+	struct se_wwn *wwn = acl->se_node_acl.se_tpg->se_tpg_wwn;
+	struct iscsi_tiqn *tiqn = container_of(wwn,
+			struct iscsi_tiqn, tiqn_wwn);
+
+	return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_SESS_RO(inst);
+
+static ssize_t iscsi_stat_sess_show_attr_node(
+	struct iscsi_node_stat_grps *igrps, char *page)
+{
+	struct iscsi_node_acl *acl = container_of(igrps,
+			struct iscsi_node_acl, node_stat_grps);
+	struct se_node_acl *se_nacl = &acl->se_node_acl;
+	struct iscsi_session *sess;
+	struct se_session *se_sess;
+	ssize_t ret = 0;
+
+	spin_lock_bh(&se_nacl->nacl_sess_lock);
+	se_sess = se_nacl->nacl_sess;
+	if (se_sess) {
+		sess = se_sess->fabric_sess_ptr;
+		if (sess)
+			ret = snprintf(page, PAGE_SIZE, "%u\n",
+				sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX);
+	}
+	spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+	return ret;
+}
+ISCSI_STAT_SESS_RO(node);
+
+static ssize_t iscsi_stat_sess_show_attr_indx(
+	struct iscsi_node_stat_grps *igrps, char *page)
+{
+	struct iscsi_node_acl *acl = container_of(igrps,
+			struct iscsi_node_acl, node_stat_grps);
+	struct se_node_acl *se_nacl = &acl->se_node_acl;
+	struct iscsi_session *sess;
+	struct se_session *se_sess;
+	ssize_t ret = 0;
+
+	spin_lock_bh(&se_nacl->nacl_sess_lock);
+	se_sess = se_nacl->nacl_sess;
+	if (se_sess) {
+		sess = se_sess->fabric_sess_ptr;
+		if (sess)
+			ret = snprintf(page, PAGE_SIZE, "%u\n",
+					sess->session_index);
+	}
+	spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+	return ret;
+}
+ISCSI_STAT_SESS_RO(indx);
+
+static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
+	struct iscsi_node_stat_grps *igrps, char *page)
+{
+	struct iscsi_node_acl *acl = container_of(igrps,
+			struct iscsi_node_acl, node_stat_grps);
+	struct se_node_acl *se_nacl = &acl->se_node_acl;
+	struct iscsi_session *sess;
+	struct se_session *se_sess;
+	ssize_t ret = 0;
+
+	spin_lock_bh(&se_nacl->nacl_sess_lock);
+	se_sess = se_nacl->nacl_sess;
+	if (se_sess) {
+		sess = se_sess->fabric_sess_ptr;
+		if (sess)
+			ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus);
+	}
+	spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+	return ret;
+}
+ISCSI_STAT_SESS_RO(cmd_pdus);
+
+static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
+	struct iscsi_node_stat_grps *igrps, char *page)
+{
+	struct iscsi_node_acl *acl = container_of(igrps,
+			struct iscsi_node_acl, node_stat_grps);
+	struct se_node_acl *se_nacl = &acl->se_node_acl;
+	struct iscsi_session *sess;
+	struct se_session *se_sess;
+	ssize_t ret = 0;
+
+	spin_lock_bh(&se_nacl->nacl_sess_lock);
+	se_sess = se_nacl->nacl_sess;
+	if (se_sess) {
+		sess = se_sess->fabric_sess_ptr;
+		if (sess)
+			ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus);
+	}
+	spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+	return ret;
+}
+ISCSI_STAT_SESS_RO(rsp_pdus);
+
+static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
+	struct iscsi_node_stat_grps *igrps, char *page)
+{
+	struct iscsi_node_acl *acl = container_of(igrps,
+			struct iscsi_node_acl, node_stat_grps);
+	struct se_node_acl *se_nacl = &acl->se_node_acl;
+	struct iscsi_session *sess;
+	struct se_session *se_sess;
+	ssize_t ret = 0;
+
+	spin_lock_bh(&se_nacl->nacl_sess_lock);
+	se_sess = se_nacl->nacl_sess;
+	if (se_sess) {
+		sess = se_sess->fabric_sess_ptr;
+		if (sess)
+			ret = snprintf(page, PAGE_SIZE, "%llu\n",
+				(unsigned long long)sess->tx_data_octets);
+	}
+	spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+	return ret;
+}
+ISCSI_STAT_SESS_RO(txdata_octs);
+
+static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
+	struct iscsi_node_stat_grps *igrps, char *page)
+{
+	struct iscsi_node_acl *acl = container_of(igrps,
+			struct iscsi_node_acl, node_stat_grps);
+	struct se_node_acl *se_nacl = &acl->se_node_acl;
+	struct iscsi_session *sess;
+	struct se_session *se_sess;
+	ssize_t ret = 0;
+
+	spin_lock_bh(&se_nacl->nacl_sess_lock);
+	se_sess = se_nacl->nacl_sess;
+	if (se_sess) {
+		sess = se_sess->fabric_sess_ptr;
+		if (sess)
+			ret = snprintf(page, PAGE_SIZE, "%llu\n",
+				(unsigned long long)sess->rx_data_octets);
+	}
+	spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+	return ret;
+}
+ISCSI_STAT_SESS_RO(rxdata_octs);
+
+static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
+	struct iscsi_node_stat_grps *igrps, char *page)
+{
+	struct iscsi_node_acl *acl = container_of(igrps,
+			struct iscsi_node_acl, node_stat_grps);
+	struct se_node_acl *se_nacl = &acl->se_node_acl;
+	struct iscsi_session *sess;
+	struct se_session *se_sess;
+	ssize_t ret = 0;
+
+	spin_lock_bh(&se_nacl->nacl_sess_lock);
+	se_sess = se_nacl->nacl_sess;
+	if (se_sess) {
+		sess = se_sess->fabric_sess_ptr;
+		if (sess)
+			ret = snprintf(page, PAGE_SIZE, "%u\n",
+					sess->conn_digest_errors);
+	}
+	spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+	return ret;
+}
+ISCSI_STAT_SESS_RO(conn_digest_errors);
+
+static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
+	struct iscsi_node_stat_grps *igrps, char *page)
+{
+	struct iscsi_node_acl *acl = container_of(igrps,
+			struct iscsi_node_acl, node_stat_grps);
+	struct se_node_acl *se_nacl = &acl->se_node_acl;
+	struct iscsi_session *sess;
+	struct se_session *se_sess;
+	ssize_t ret = 0;
+
+	spin_lock_bh(&se_nacl->nacl_sess_lock);
+	se_sess = se_nacl->nacl_sess;
+	if (se_sess) {
+		sess = se_sess->fabric_sess_ptr;
+		if (sess)
+			ret = snprintf(page, PAGE_SIZE, "%u\n",
+					sess->conn_timeout_errors);
+	}
+	spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+	return ret;
+}
+ISCSI_STAT_SESS_RO(conn_timeout_errors);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_sess, iscsi_node_stat_grps,
+		iscsi_sess_stats_group);
+
+static struct configfs_attribute *iscsi_stat_sess_stats_attrs[] = {
+	&iscsi_stat_sess_inst.attr,
+	&iscsi_stat_sess_node.attr,
+	&iscsi_stat_sess_indx.attr,
+	&iscsi_stat_sess_cmd_pdus.attr,
+	&iscsi_stat_sess_rsp_pdus.attr,
+	&iscsi_stat_sess_txdata_octs.attr,
+	&iscsi_stat_sess_rxdata_octs.attr,
+	&iscsi_stat_sess_conn_digest_errors.attr,
+	&iscsi_stat_sess_conn_timeout_errors.attr,
+	NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_sess_stats_item_ops = {
+	.show_attribute		= iscsi_stat_sess_attr_show,
+	.store_attribute	= iscsi_stat_sess_attr_store,
+};
+
+struct config_item_type iscsi_stat_sess_cit = {
+	.ct_item_ops		= &iscsi_stat_sess_stats_item_ops,
+	.ct_attrs		= iscsi_stat_sess_stats_attrs,
+	.ct_owner		= THIS_MODULE,
+};
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_stat.h b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_stat.h
new file mode 100644
index 0000000..3ff76b4
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_stat.h
@@ -0,0 +1,64 @@
+#ifndef ISCSI_TARGET_STAT_H
+#define ISCSI_TARGET_STAT_H
+
+/*
+ * For struct iscsi_tiqn->tiqn_wwn default groups
+ */
+extern struct config_item_type iscsi_stat_instance_cit;
+extern struct config_item_type iscsi_stat_sess_err_cit;
+extern struct config_item_type iscsi_stat_tgt_attr_cit;
+extern struct config_item_type iscsi_stat_login_cit;
+extern struct config_item_type iscsi_stat_logout_cit;
+
+/*
+ * For struct iscsi_session->se_sess default groups
+ */
+extern struct config_item_type iscsi_stat_sess_cit;
+
+/* iSCSI session error types */
+#define ISCSI_SESS_ERR_UNKNOWN		0
+#define ISCSI_SESS_ERR_DIGEST		1
+#define ISCSI_SESS_ERR_CXN_TIMEOUT	2
+#define ISCSI_SESS_ERR_PDU_FORMAT	3
+
+/* iSCSI session error stats */
+struct iscsi_sess_err_stats {
+	spinlock_t	lock;
+	u32		digest_errors;
+	u32		cxn_timeout_errors;
+	u32		pdu_format_errors;
+	u32		last_sess_failure_type;
+	char		last_sess_fail_rem_name[224];
+} ____cacheline_aligned;
+
+/* iSCSI login failure types (sub oids) */
+#define ISCSI_LOGIN_FAIL_OTHER		2
+#define ISCSI_LOGIN_FAIL_REDIRECT	3
+#define ISCSI_LOGIN_FAIL_AUTHORIZE	4
+#define ISCSI_LOGIN_FAIL_AUTHENTICATE	5
+#define ISCSI_LOGIN_FAIL_NEGOTIATE	6
+
+/* iSCSI login stats */
+struct iscsi_login_stats {
+	spinlock_t	lock;
+	u32		accepts;
+	u32		other_fails;
+	u32		redirects;
+	u32		authorize_fails;
+	u32		authenticate_fails;
+	u32		negotiate_fails;	/* used for notifications */
+	u64		last_fail_time;		/* time stamp (jiffies) */
+	u32		last_fail_type;
+	int		last_intr_fail_ip_family;
+	unsigned char	last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE];
+	char		last_intr_fail_name[224];
+} ____cacheline_aligned;
+
+/* iSCSI logout stats */
+struct iscsi_logout_stats {
+	spinlock_t	lock;
+	u32		normal_logouts;
+	u32		abnormal_logouts;
+} ____cacheline_aligned;
+
+#endif   /*** ISCSI_TARGET_STAT_H ***/
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_tmr.c b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_tmr.c
new file mode 100644
index 0000000..e01da9d
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_tmr.c
@@ -0,0 +1,849 @@
+/*******************************************************************************
+ * This file contains the iSCSI Target specific Task Management functions.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <asm/unaligned.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_tmr.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+
+u8 iscsit_tmr_abort_task(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	struct iscsi_cmd *ref_cmd;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
+	struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
+	struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
+
+	ref_cmd = iscsit_find_cmd_from_itt(conn, hdr->rtt);
+	if (!ref_cmd) {
+		pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
+			" %hu.\n", hdr->rtt, conn->cid);
+		return ((hdr->refcmdsn >= conn->sess->exp_cmd_sn) &&
+			(hdr->refcmdsn <= conn->sess->max_cmd_sn)) ?
+			ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
+	}
+	if (ref_cmd->cmd_sn != hdr->refcmdsn) {
+		pr_err("RefCmdSN 0x%08x does not equal"
+			" task's CmdSN 0x%08x. Rejecting ABORT_TASK.\n",
+			hdr->refcmdsn, ref_cmd->cmd_sn);
+		return ISCSI_TMF_RSP_REJECTED;
+	}
+
+	se_tmr->ref_task_tag		= hdr->rtt;
+	se_tmr->ref_cmd			= &ref_cmd->se_cmd;
+	tmr_req->ref_cmd_sn		= hdr->refcmdsn;
+	tmr_req->exp_data_sn		= hdr->exp_datasn;
+
+	return ISCSI_TMF_RSP_COMPLETE;
+}
+
+/*
+ *	Called from iscsit_handle_task_mgt_cmd().
+ */
+int iscsit_tmr_task_warm_reset(
+	struct iscsi_conn *conn,
+	struct iscsi_tmr_req *tmr_req,
+	unsigned char *buf)
+{
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+#if 0
+	struct iscsi_init_task_mgt_cmnd *hdr =
+		(struct iscsi_init_task_mgt_cmnd *) buf;
+#endif
+	if (!na->tmr_warm_reset) {
+		pr_err("TMR Opcode TARGET_WARM_RESET authorization"
+			" failed for Initiator Node: %s\n",
+			sess->se_sess->se_node_acl->initiatorname);
+		 return -1;
+	}
+	/*
+	 * Do the real work in transport_generic_do_tmr().
+	 */
+	return 0;
+}
+
+int iscsit_tmr_task_cold_reset(
+	struct iscsi_conn *conn,
+	struct iscsi_tmr_req *tmr_req,
+	unsigned char *buf)
+{
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+	if (!na->tmr_cold_reset) {
+		pr_err("TMR Opcode TARGET_COLD_RESET authorization"
+			" failed for Initiator Node: %s\n",
+			sess->se_sess->se_node_acl->initiatorname);
+		return -1;
+	}
+	/*
+	 * Do the real work in transport_generic_do_tmr().
+	 */
+	return 0;
+}
+
+u8 iscsit_tmr_task_reassign(
+	struct iscsi_cmd *cmd,
+	unsigned char *buf)
+{
+	struct iscsi_cmd *ref_cmd = NULL;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_conn_recovery *cr = NULL;
+	struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
+	struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
+	struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
+	int ret;
+
+	pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x,"
+		" RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n",
+		hdr->itt, hdr->rtt, hdr->exp_datasn, conn->cid);
+
+	if (conn->sess->sess_ops->ErrorRecoveryLevel != 2) {
+		pr_err("TMR TASK_REASSIGN not supported in ERL<2,"
+				" ignoring request.\n");
+		return ISCSI_TMF_RSP_NOT_SUPPORTED;
+	}
+
+	ret = iscsit_find_cmd_for_recovery(conn->sess, &ref_cmd, &cr, hdr->rtt);
+	if (ret == -2) {
+		pr_err("Command ITT: 0x%08x is still alligent to CID:"
+			" %hu\n", ref_cmd->init_task_tag, cr->cid);
+		return ISCSI_TMF_RSP_TASK_ALLEGIANT;
+	} else if (ret == -1) {
+		pr_err("Unable to locate RefTaskTag: 0x%08x in"
+			" connection recovery command list.\n", hdr->rtt);
+		return ISCSI_TMF_RSP_NO_TASK;
+	}
+	/*
+	 * Temporary check to prevent connection recovery for
+	 * connections with a differing MaxRecvDataSegmentLength.
+	 */
+	if (cr->maxrecvdatasegmentlength !=
+	    conn->conn_ops->MaxRecvDataSegmentLength) {
+		pr_err("Unable to perform connection recovery for"
+			" differing MaxRecvDataSegmentLength, rejecting"
+			" TMR TASK_REASSIGN.\n");
+		return ISCSI_TMF_RSP_REJECTED;
+	}
+
+	se_tmr->ref_task_tag		= hdr->rtt;
+	se_tmr->ref_cmd			= &ref_cmd->se_cmd;
+	se_tmr->ref_task_lun		= get_unaligned_le64(&hdr->lun);
+	tmr_req->ref_cmd_sn		= hdr->refcmdsn;
+	tmr_req->exp_data_sn		= hdr->exp_datasn;
+	tmr_req->conn_recovery		= cr;
+	tmr_req->task_reassign		= 1;
+	/*
+	 * Command can now be reassigned to a new connection.
+	 * The task management response must be sent before the
+	 * reassignment actually happens.  See iscsi_tmr_post_handler().
+	 */
+	return ISCSI_TMF_RSP_COMPLETE;
+}
+
+static void iscsit_task_reassign_remove_cmd(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn_recovery *cr,
+	struct iscsi_session *sess)
+{
+	int ret;
+
+	spin_lock(&cr->conn_recovery_cmd_lock);
+	ret = iscsit_remove_cmd_from_connection_recovery(cmd, sess);
+	spin_unlock(&cr->conn_recovery_cmd_lock);
+	if (!ret) {
+		pr_debug("iSCSI connection recovery successful for CID:"
+			" %hu on SID: %u\n", cr->cid, sess->sid);
+		iscsit_remove_active_connection_recovery_entry(cr, sess);
+	}
+}
+
+static int iscsit_task_reassign_complete_nop_out(
+	struct iscsi_tmr_req *tmr_req,
+	struct iscsi_conn *conn)
+{
+	struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
+	struct se_cmd *se_cmd = se_tmr->ref_cmd;
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+	struct iscsi_conn_recovery *cr;
+
+	if (!cmd->cr) {
+		pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
+			" is NULL!\n", cmd->init_task_tag);
+		return -1;
+	}
+	cr = cmd->cr;
+
+	/*
+	 * Reset the StatSN so a new one for this commands new connection
+	 * will be assigned.
+	 * Reset the ExpStatSN as well so we may receive Status SNACKs.
+	 */
+	cmd->stat_sn = cmd->exp_stat_sn = 0;
+
+	iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+	spin_unlock_bh(&conn->cmd_lock);
+
+	cmd->i_state = ISTATE_SEND_NOPIN;
+	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+	return 0;
+}
+
+static int iscsit_task_reassign_complete_write(
+	struct iscsi_cmd *cmd,
+	struct iscsi_tmr_req *tmr_req)
+{
+	int no_build_r2ts = 0;
+	u32 length = 0, offset = 0;
+	struct iscsi_conn *conn = cmd->conn;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	/*
+	 * The Initiator must not send a R2T SNACK with a Begrun less than
+	 * the TMR TASK_REASSIGN's ExpDataSN.
+	 */
+	if (!tmr_req->exp_data_sn) {
+		cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK;
+		cmd->acked_data_sn = 0;
+	} else {
+		cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
+		cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
+	}
+
+	/*
+	 * The TMR TASK_REASSIGN's ExpDataSN contains the next R2TSN the
+	 * Initiator is expecting.  The Target controls all WRITE operations
+	 * so if we have received all DataOUT we can safety ignore Initiator.
+	 */
+	if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
+		if (!(cmd->se_cmd.transport_state & CMD_T_SENT)) {
+			pr_debug("WRITE ITT: 0x%08x: t_state: %d"
+				" never sent to transport\n",
+				cmd->init_task_tag, cmd->se_cmd.t_state);
+			return transport_generic_handle_data(se_cmd);
+		}
+
+		cmd->i_state = ISTATE_SEND_STATUS;
+		iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+		return 0;
+	}
+
+	/*
+	 * Special case to deal with DataSequenceInOrder=No and Non-Immeidate
+	 * Unsolicited DataOut.
+	 */
+	if (cmd->unsolicited_data) {
+		cmd->unsolicited_data = 0;
+
+		offset = cmd->next_burst_len = cmd->write_data_done;
+
+		if ((conn->sess->sess_ops->FirstBurstLength - offset) >=
+		     cmd->data_length) {
+			no_build_r2ts = 1;
+			length = (cmd->data_length - offset);
+		} else
+			length = (conn->sess->sess_ops->FirstBurstLength - offset);
+
+		spin_lock_bh(&cmd->r2t_lock);
+		if (iscsit_add_r2t_to_list(cmd, offset, length, 0, 0) < 0) {
+			spin_unlock_bh(&cmd->r2t_lock);
+			return -1;
+		}
+		cmd->outstanding_r2ts++;
+		spin_unlock_bh(&cmd->r2t_lock);
+
+		if (no_build_r2ts)
+			return 0;
+	}
+	/*
+	 * iscsit_build_r2ts_for_cmd() can handle the rest from here.
+	 */
+	return iscsit_build_r2ts_for_cmd(cmd, conn, 2);
+}
+
+static int iscsit_task_reassign_complete_read(
+	struct iscsi_cmd *cmd,
+	struct iscsi_tmr_req *tmr_req)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_datain_req *dr;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	/*
+	 * The Initiator must not send a Data SNACK with a BegRun less than
+	 * the TMR TASK_REASSIGN's ExpDataSN.
+	 */
+	if (!tmr_req->exp_data_sn) {
+		cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK;
+		cmd->acked_data_sn = 0;
+	} else {
+		cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
+		cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
+	}
+
+	if (!(cmd->se_cmd.transport_state & CMD_T_SENT)) {
+		pr_debug("READ ITT: 0x%08x: t_state: %d never sent to"
+			" transport\n", cmd->init_task_tag,
+			cmd->se_cmd.t_state);
+		transport_handle_cdb_direct(se_cmd);
+		return 0;
+	}
+
+	if (!(se_cmd->transport_state & CMD_T_COMPLETE)) {
+		pr_err("READ ITT: 0x%08x: t_state: %d, never returned"
+			" from transport\n", cmd->init_task_tag,
+			cmd->se_cmd.t_state);
+		return -1;
+	}
+
+	dr = iscsit_allocate_datain_req();
+	if (!dr)
+		return -1;
+	/*
+	 * The TMR TASK_REASSIGN's ExpDataSN contains the next DataSN the
+	 * Initiator is expecting.
+	 */
+	dr->data_sn = dr->begrun = tmr_req->exp_data_sn;
+	dr->runlength = 0;
+	dr->generate_recovery_values = 1;
+	dr->recovery = DATAIN_CONNECTION_RECOVERY;
+
+	iscsit_attach_datain_req(cmd, dr);
+
+	cmd->i_state = ISTATE_SEND_DATAIN;
+	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+	return 0;
+}
+
+static int iscsit_task_reassign_complete_none(
+	struct iscsi_cmd *cmd,
+	struct iscsi_tmr_req *tmr_req)
+{
+	struct iscsi_conn *conn = cmd->conn;
+
+	cmd->i_state = ISTATE_SEND_STATUS;
+	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+	return 0;
+}
+
+static int iscsit_task_reassign_complete_scsi_cmnd(
+	struct iscsi_tmr_req *tmr_req,
+	struct iscsi_conn *conn)
+{
+	struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
+	struct se_cmd *se_cmd = se_tmr->ref_cmd;
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+	struct iscsi_conn_recovery *cr;
+
+	if (!cmd->cr) {
+		pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
+			" is NULL!\n", cmd->init_task_tag);
+		return -1;
+	}
+	cr = cmd->cr;
+
+	/*
+	 * Reset the StatSN so a new one for this commands new connection
+	 * will be assigned.
+	 * Reset the ExpStatSN as well so we may receive Status SNACKs.
+	 */
+	cmd->stat_sn = cmd->exp_stat_sn = 0;
+
+	iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+	spin_unlock_bh(&conn->cmd_lock);
+
+	if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+		cmd->i_state = ISTATE_SEND_STATUS;
+		iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+		return 0;
+	}
+
+	switch (cmd->data_direction) {
+	case DMA_TO_DEVICE:
+		return iscsit_task_reassign_complete_write(cmd, tmr_req);
+	case DMA_FROM_DEVICE:
+		return iscsit_task_reassign_complete_read(cmd, tmr_req);
+	case DMA_NONE:
+		return iscsit_task_reassign_complete_none(cmd, tmr_req);
+	default:
+		pr_err("Unknown cmd->data_direction: 0x%02x\n",
+				cmd->data_direction);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int iscsit_task_reassign_complete(
+	struct iscsi_tmr_req *tmr_req,
+	struct iscsi_conn *conn)
+{
+	struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
+	struct se_cmd *se_cmd;
+	struct iscsi_cmd *cmd;
+	int ret = 0;
+
+	if (!se_tmr->ref_cmd) {
+		pr_err("TMR Request is missing a RefCmd struct iscsi_cmd.\n");
+		return -1;
+	}
+	se_cmd = se_tmr->ref_cmd;
+	cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+	cmd->conn = conn;
+
+	switch (cmd->iscsi_opcode) {
+	case ISCSI_OP_NOOP_OUT:
+		ret = iscsit_task_reassign_complete_nop_out(tmr_req, conn);
+		break;
+	case ISCSI_OP_SCSI_CMD:
+		ret = iscsit_task_reassign_complete_scsi_cmnd(tmr_req, conn);
+		break;
+	default:
+		 pr_err("Illegal iSCSI Opcode 0x%02x during"
+			" command realligence\n", cmd->iscsi_opcode);
+		return -1;
+	}
+
+	if (ret != 0)
+		return ret;
+
+	pr_debug("Completed connection realligence for Opcode: 0x%02x,"
+		" ITT: 0x%08x to CID: %hu.\n", cmd->iscsi_opcode,
+			cmd->init_task_tag, conn->cid);
+
+	return 0;
+}
+
+/*
+ *	Handles special after-the-fact actions related to TMRs.
+ *	Right now the only one that its really needed for is
+ *	connection recovery releated TASK_REASSIGN.
+ */
+extern int iscsit_tmr_post_handler(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+	struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
+	struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
+
+	if (tmr_req->task_reassign &&
+	   (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
+		return iscsit_task_reassign_complete(tmr_req, conn);
+
+	return 0;
+}
+
+/*
+ *	Nothing to do here, but leave it for good measure. :-)
+ */
+int iscsit_task_reassign_prepare_read(
+	struct iscsi_tmr_req *tmr_req,
+	struct iscsi_conn *conn)
+{
+	return 0;
+}
+
+static void iscsit_task_reassign_prepare_unsolicited_dataout(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	int i, j;
+	struct iscsi_pdu *pdu = NULL;
+	struct iscsi_seq *seq = NULL;
+
+	if (conn->sess->sess_ops->DataSequenceInOrder) {
+		cmd->data_sn = 0;
+
+		if (cmd->immediate_data)
+			cmd->r2t_offset += (cmd->first_burst_len -
+				cmd->seq_start_offset);
+
+		if (conn->sess->sess_ops->DataPDUInOrder) {
+			cmd->write_data_done -= (cmd->immediate_data) ?
+						(cmd->first_burst_len -
+						 cmd->seq_start_offset) :
+						 cmd->first_burst_len;
+			cmd->first_burst_len = 0;
+			return;
+		}
+
+		for (i = 0; i < cmd->pdu_count; i++) {
+			pdu = &cmd->pdu_list[i];
+
+			if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+				continue;
+
+			if ((pdu->offset >= cmd->seq_start_offset) &&
+			   ((pdu->offset + pdu->length) <=
+			     cmd->seq_end_offset)) {
+				cmd->first_burst_len -= pdu->length;
+				cmd->write_data_done -= pdu->length;
+				pdu->status = ISCSI_PDU_NOT_RECEIVED;
+			}
+		}
+	} else {
+		for (i = 0; i < cmd->seq_count; i++) {
+			seq = &cmd->seq_list[i];
+
+			if (seq->type != SEQTYPE_UNSOLICITED)
+				continue;
+
+			cmd->write_data_done -=
+					(seq->offset - seq->orig_offset);
+			cmd->first_burst_len = 0;
+			seq->data_sn = 0;
+			seq->offset = seq->orig_offset;
+			seq->next_burst_len = 0;
+			seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
+
+			if (conn->sess->sess_ops->DataPDUInOrder)
+				continue;
+
+			for (j = 0; j < seq->pdu_count; j++) {
+				pdu = &cmd->pdu_list[j+seq->pdu_start];
+
+				if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+					continue;
+
+				pdu->status = ISCSI_PDU_NOT_RECEIVED;
+			}
+		}
+	}
+}
+
+int iscsit_task_reassign_prepare_write(
+	struct iscsi_tmr_req *tmr_req,
+	struct iscsi_conn *conn)
+{
+	struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
+	struct se_cmd *se_cmd = se_tmr->ref_cmd;
+	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+	struct iscsi_pdu *pdu = NULL;
+	struct iscsi_r2t *r2t = NULL, *r2t_tmp;
+	int first_incomplete_r2t = 1, i = 0;
+
+	/*
+	 * The command was in the process of receiving Unsolicited DataOUT when
+	 * the connection failed.
+	 */
+	if (cmd->unsolicited_data)
+		iscsit_task_reassign_prepare_unsolicited_dataout(cmd, conn);
+
+	/*
+	 * The Initiator is requesting R2Ts starting from zero,  skip
+	 * checking acknowledged R2Ts and start checking struct iscsi_r2ts
+	 * greater than zero.
+	 */
+	if (!tmr_req->exp_data_sn)
+		goto drop_unacknowledged_r2ts;
+
+	/*
+	 * We now check that the PDUs in DataOUT sequences below
+	 * the TMR TASK_REASSIGN ExpDataSN (R2TSN the Initiator is
+	 * expecting next) have all the DataOUT they require to complete
+	 * the DataOUT sequence.  First scan from R2TSN 0 to TMR
+	 * TASK_REASSIGN ExpDataSN-1.
+	 *
+	 * If we have not received all DataOUT in question,  we must
+	 * make sure to make the appropriate changes to values in
+	 * struct iscsi_cmd (and elsewhere depending on session parameters)
+	 * so iscsit_build_r2ts_for_cmd() in iscsit_task_reassign_complete_write()
+	 * will resend a new R2T for the DataOUT sequences in question.
+	 */
+	spin_lock_bh(&cmd->r2t_lock);
+	if (list_empty(&cmd->cmd_r2t_list)) {
+		spin_unlock_bh(&cmd->r2t_lock);
+		return -1;
+	}
+
+	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+
+		if (r2t->r2t_sn >= tmr_req->exp_data_sn)
+			continue;
+		/*
+		 * Safely ignore Recovery R2Ts and R2Ts that have completed
+		 * DataOUT sequences.
+		 */
+		if (r2t->seq_complete)
+			continue;
+
+		if (r2t->recovery_r2t)
+			continue;
+
+		/*
+		 *                 DataSequenceInOrder=Yes:
+		 *
+		 * Taking into account the iSCSI implementation requirement of
+		 * MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and
+		 * DataSequenceInOrder=Yes, we must take into consideration
+		 * the following:
+		 *
+		 *                  DataSequenceInOrder=No:
+		 *
+		 * Taking into account that the Initiator controls the (possibly
+		 * random) PDU Order in (possibly random) Sequence Order of
+		 * DataOUT the target requests with R2Ts,  we must take into
+		 * consideration the following:
+		 *
+		 *      DataPDUInOrder=Yes for DataSequenceInOrder=[Yes,No]:
+		 *
+		 * While processing non-complete R2T DataOUT sequence requests
+		 * the Target will re-request only the total sequence length
+		 * minus current received offset.  This is because we must
+		 * assume the initiator will continue sending DataOUT from the
+		 * last PDU before the connection failed.
+		 *
+		 *      DataPDUInOrder=No for DataSequenceInOrder=[Yes,No]:
+		 *
+		 * While processing non-complete R2T DataOUT sequence requests
+		 * the Target will re-request the entire DataOUT sequence if
+		 * any single PDU is missing from the sequence.  This is because
+		 * we have no logical method to determine the next PDU offset,
+		 * and we must assume the Initiator will be sending any random
+		 * PDU offset in the current sequence after TASK_REASSIGN
+		 * has completed.
+		 */
+		if (conn->sess->sess_ops->DataSequenceInOrder) {
+			if (!first_incomplete_r2t) {
+				cmd->r2t_offset -= r2t->xfer_len;
+				goto next;
+			}
+
+			if (conn->sess->sess_ops->DataPDUInOrder) {
+				cmd->data_sn = 0;
+				cmd->r2t_offset -= (r2t->xfer_len -
+					cmd->next_burst_len);
+				first_incomplete_r2t = 0;
+				goto next;
+			}
+
+			cmd->data_sn = 0;
+			cmd->r2t_offset -= r2t->xfer_len;
+
+			for (i = 0; i < cmd->pdu_count; i++) {
+				pdu = &cmd->pdu_list[i];
+
+				if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+					continue;
+
+				if ((pdu->offset >= r2t->offset) &&
+				    (pdu->offset < (r2t->offset +
+						r2t->xfer_len))) {
+					cmd->next_burst_len -= pdu->length;
+					cmd->write_data_done -= pdu->length;
+					pdu->status = ISCSI_PDU_NOT_RECEIVED;
+				}
+			}
+
+			first_incomplete_r2t = 0;
+		} else {
+			struct iscsi_seq *seq;
+
+			seq = iscsit_get_seq_holder(cmd, r2t->offset,
+					r2t->xfer_len);
+			if (!seq) {
+				spin_unlock_bh(&cmd->r2t_lock);
+				return -1;
+			}
+
+			cmd->write_data_done -=
+					(seq->offset - seq->orig_offset);
+			seq->data_sn = 0;
+			seq->offset = seq->orig_offset;
+			seq->next_burst_len = 0;
+			seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
+
+			cmd->seq_send_order--;
+
+			if (conn->sess->sess_ops->DataPDUInOrder)
+				goto next;
+
+			for (i = 0; i < seq->pdu_count; i++) {
+				pdu = &cmd->pdu_list[i+seq->pdu_start];
+
+				if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+					continue;
+
+				pdu->status = ISCSI_PDU_NOT_RECEIVED;
+			}
+		}
+
+next:
+		cmd->outstanding_r2ts--;
+	}
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	/*
+	 * We now drop all unacknowledged R2Ts, ie: ExpDataSN from TMR
+	 * TASK_REASSIGN to the last R2T in the list..  We are also careful
+	 * to check that the Initiator is not requesting R2Ts for DataOUT
+	 * sequences it has already completed.
+	 *
+	 * Free each R2T in question and adjust values in struct iscsi_cmd
+	 * accordingly so iscsit_build_r2ts_for_cmd() do the rest of
+	 * the work after the TMR TASK_REASSIGN Response is sent.
+	 */
+drop_unacknowledged_r2ts:
+
+	cmd->cmd_flags &= ~ICF_SENT_LAST_R2T;
+	cmd->r2t_sn = tmr_req->exp_data_sn;
+
+	spin_lock_bh(&cmd->r2t_lock);
+	list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list) {
+		/*
+		 * Skip up to the R2T Sequence number provided by the
+		 * iSCSI TASK_REASSIGN TMR
+		 */
+		if (r2t->r2t_sn < tmr_req->exp_data_sn)
+			continue;
+
+		if (r2t->seq_complete) {
+			pr_err("Initiator is requesting R2Ts from"
+				" R2TSN: 0x%08x, but R2TSN: 0x%08x, Offset: %u,"
+				" Length: %u is already complete."
+				"   BAD INITIATOR ERL=2 IMPLEMENTATION!\n",
+				tmr_req->exp_data_sn, r2t->r2t_sn,
+				r2t->offset, r2t->xfer_len);
+			spin_unlock_bh(&cmd->r2t_lock);
+			return -1;
+		}
+
+		if (r2t->recovery_r2t) {
+			iscsit_free_r2t(r2t, cmd);
+			continue;
+		}
+
+		/*		   DataSequenceInOrder=Yes:
+		 *
+		 * Taking into account the iSCSI implementation requirement of
+		 * MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and
+		 * DataSequenceInOrder=Yes, it's safe to subtract the R2Ts
+		 * entire transfer length from the commands R2T offset marker.
+		 *
+		 *		   DataSequenceInOrder=No:
+		 *
+		 * We subtract the difference from struct iscsi_seq between the
+		 * current offset and original offset from cmd->write_data_done
+		 * for account for DataOUT PDUs already received.  Then reset
+		 * the current offset to the original and zero out the current
+		 * burst length,  to make sure we re-request the entire DataOUT
+		 * sequence.
+		 */
+		if (conn->sess->sess_ops->DataSequenceInOrder)
+			cmd->r2t_offset -= r2t->xfer_len;
+		else
+			cmd->seq_send_order--;
+
+		cmd->outstanding_r2ts--;
+		iscsit_free_r2t(r2t, cmd);
+	}
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	return 0;
+}
+
+/*
+ *	Performs sanity checks TMR TASK_REASSIGN's ExpDataSN for
+ *	a given struct iscsi_cmd.
+ */
+int iscsit_check_task_reassign_expdatasn(
+	struct iscsi_tmr_req *tmr_req,
+	struct iscsi_conn *conn)
+{
+	struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
+	struct se_cmd *se_cmd = se_tmr->ref_cmd;
+	struct iscsi_cmd *ref_cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+	if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD)
+		return 0;
+
+	if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
+		return 0;
+
+	if (ref_cmd->data_direction == DMA_NONE)
+		return 0;
+
+	/*
+	 * For READs the TMR TASK_REASSIGNs ExpDataSN contains the next DataSN
+	 * of DataIN the Initiator is expecting.
+	 *
+	 * Also check that the Initiator is not re-requesting DataIN that has
+	 * already been acknowledged with a DataAck SNACK.
+	 */
+	if (ref_cmd->data_direction == DMA_FROM_DEVICE) {
+		if (tmr_req->exp_data_sn > ref_cmd->data_sn) {
+			pr_err("Received ExpDataSN: 0x%08x for READ"
+				" in TMR TASK_REASSIGN greater than command's"
+				" DataSN: 0x%08x.\n", tmr_req->exp_data_sn,
+				ref_cmd->data_sn);
+			return -1;
+		}
+		if ((ref_cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
+		    (tmr_req->exp_data_sn <= ref_cmd->acked_data_sn)) {
+			pr_err("Received ExpDataSN: 0x%08x for READ"
+				" in TMR TASK_REASSIGN for previously"
+				" acknowledged DataIN: 0x%08x,"
+				" protocol error\n", tmr_req->exp_data_sn,
+				ref_cmd->acked_data_sn);
+			return -1;
+		}
+		return iscsit_task_reassign_prepare_read(tmr_req, conn);
+	}
+
+	/*
+	 * For WRITEs the TMR TASK_REASSIGNs ExpDataSN contains the next R2TSN
+	 * for R2Ts the Initiator is expecting.
+	 *
+	 * Do the magic in iscsit_task_reassign_prepare_write().
+	 */
+	if (ref_cmd->data_direction == DMA_TO_DEVICE) {
+		if (tmr_req->exp_data_sn > ref_cmd->r2t_sn) {
+			pr_err("Received ExpDataSN: 0x%08x for WRITE"
+				" in TMR TASK_REASSIGN greater than command's"
+				" R2TSN: 0x%08x.\n", tmr_req->exp_data_sn,
+					ref_cmd->r2t_sn);
+			return -1;
+		}
+		return iscsit_task_reassign_prepare_write(tmr_req, conn);
+	}
+
+	pr_err("Unknown iSCSI data_direction: 0x%02x\n",
+			ref_cmd->data_direction);
+
+	return -1;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_tmr.h b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_tmr.h
new file mode 100644
index 0000000..142e992
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_tmr.h
@@ -0,0 +1,14 @@
+#ifndef ISCSI_TARGET_TMR_H
+#define ISCSI_TARGET_TMR_H
+
+extern u8 iscsit_tmr_abort_task(struct iscsi_cmd *, unsigned char *);
+extern int iscsit_tmr_task_warm_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
+			unsigned char *);
+extern int iscsit_tmr_task_cold_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
+			unsigned char *);
+extern u8 iscsit_tmr_task_reassign(struct iscsi_cmd *, unsigned char *);
+extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_check_task_reassign_expdatasn(struct iscsi_tmr_req *,
+			struct iscsi_conn *);
+
+#endif /* ISCSI_TARGET_TMR_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_tpg.c b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_tpg.c
new file mode 100644
index 0000000..c3d7bf5
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_tpg.c
@@ -0,0 +1,769 @@
+/*******************************************************************************
+ * This file contains iSCSI Target Portal Group related functions.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_login.h"
+#include "iscsi_target_nodeattrib.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_parameters.h"
+
+struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u16 tpgt)
+{
+	struct iscsi_portal_group *tpg;
+
+	tpg = kzalloc(sizeof(struct iscsi_portal_group), GFP_KERNEL);
+	if (!tpg) {
+		pr_err("Unable to allocate struct iscsi_portal_group\n");
+		return NULL;
+	}
+
+	tpg->tpgt = tpgt;
+	tpg->tpg_state = TPG_STATE_FREE;
+	tpg->tpg_tiqn = tiqn;
+	INIT_LIST_HEAD(&tpg->tpg_gnp_list);
+	INIT_LIST_HEAD(&tpg->tpg_list);
+	mutex_init(&tpg->tpg_access_lock);
+	mutex_init(&tpg->np_login_lock);
+	spin_lock_init(&tpg->tpg_state_lock);
+	spin_lock_init(&tpg->tpg_np_lock);
+
+	return tpg;
+}
+
+static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *);
+
+int iscsit_load_discovery_tpg(void)
+{
+	struct iscsi_param *param;
+	struct iscsi_portal_group *tpg;
+	int ret;
+
+	tpg = iscsit_alloc_portal_group(NULL, 1);
+	if (!tpg) {
+		pr_err("Unable to allocate struct iscsi_portal_group\n");
+		return -1;
+	}
+
+	ret = core_tpg_register(
+			&lio_target_fabric_configfs->tf_ops,
+			NULL, &tpg->tpg_se_tpg, tpg,
+			TRANSPORT_TPG_TYPE_DISCOVERY);
+	if (ret < 0) {
+		kfree(tpg);
+		return -1;
+	}
+
+	tpg->sid = 1; /* First Assigned LIO Session ID */
+	iscsit_set_default_tpg_attribs(tpg);
+
+	if (iscsi_create_default_params(&tpg->param_list) < 0)
+		goto out;
+	/*
+	 * By default we disable authentication for discovery sessions,
+	 * this can be changed with:
+	 *
+	 * /sys/kernel/config/target/iscsi/discovery_auth/enforce_discovery_auth
+	 */
+	param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
+	if (!param)
+		goto out;
+
+	if (iscsi_update_param_value(param, "CHAP,None") < 0)
+		goto out;
+
+	tpg->tpg_attrib.authentication = 0;
+
+	spin_lock(&tpg->tpg_state_lock);
+	tpg->tpg_state  = TPG_STATE_ACTIVE;
+	spin_unlock(&tpg->tpg_state_lock);
+
+	iscsit_global->discovery_tpg = tpg;
+	pr_debug("CORE[0] - Allocated Discovery TPG\n");
+
+	return 0;
+out:
+	if (tpg->sid == 1)
+		core_tpg_deregister(&tpg->tpg_se_tpg);
+	kfree(tpg);
+	return -1;
+}
+
+void iscsit_release_discovery_tpg(void)
+{
+	struct iscsi_portal_group *tpg = iscsit_global->discovery_tpg;
+
+	if (!tpg)
+		return;
+
+	core_tpg_deregister(&tpg->tpg_se_tpg);
+
+	kfree(tpg);
+	iscsit_global->discovery_tpg = NULL;
+}
+
+struct iscsi_portal_group *iscsit_get_tpg_from_np(
+	struct iscsi_tiqn *tiqn,
+	struct iscsi_np *np)
+{
+	struct iscsi_portal_group *tpg = NULL;
+	struct iscsi_tpg_np *tpg_np;
+
+	spin_lock(&tiqn->tiqn_tpg_lock);
+	list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
+
+		spin_lock(&tpg->tpg_state_lock);
+		if (tpg->tpg_state == TPG_STATE_FREE) {
+			spin_unlock(&tpg->tpg_state_lock);
+			continue;
+		}
+		spin_unlock(&tpg->tpg_state_lock);
+
+		spin_lock(&tpg->tpg_np_lock);
+		list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
+			if (tpg_np->tpg_np == np) {
+				spin_unlock(&tpg->tpg_np_lock);
+				spin_unlock(&tiqn->tiqn_tpg_lock);
+				return tpg;
+			}
+		}
+		spin_unlock(&tpg->tpg_np_lock);
+	}
+	spin_unlock(&tiqn->tiqn_tpg_lock);
+
+	return NULL;
+}
+
+int iscsit_get_tpg(
+	struct iscsi_portal_group *tpg)
+{
+	int ret;
+
+	ret = mutex_lock_interruptible(&tpg->tpg_access_lock);
+	return ((ret != 0) || signal_pending(current)) ? -1 : 0;
+}
+
+void iscsit_put_tpg(struct iscsi_portal_group *tpg)
+{
+	mutex_unlock(&tpg->tpg_access_lock);
+}
+
+static void iscsit_clear_tpg_np_login_thread(
+	struct iscsi_tpg_np *tpg_np,
+	struct iscsi_portal_group *tpg)
+{
+	if (!tpg_np->tpg_np) {
+		pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
+		return;
+	}
+
+	iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg);
+}
+
+void iscsit_clear_tpg_np_login_threads(
+	struct iscsi_portal_group *tpg)
+{
+	struct iscsi_tpg_np *tpg_np;
+
+	spin_lock(&tpg->tpg_np_lock);
+	list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
+		if (!tpg_np->tpg_np) {
+			pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
+			continue;
+		}
+		spin_unlock(&tpg->tpg_np_lock);
+		iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
+		spin_lock(&tpg->tpg_np_lock);
+	}
+	spin_unlock(&tpg->tpg_np_lock);
+}
+
+void iscsit_tpg_dump_params(struct iscsi_portal_group *tpg)
+{
+	iscsi_print_params(tpg->param_list);
+}
+
+static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	a->authentication = TA_AUTHENTICATION;
+	a->login_timeout = TA_LOGIN_TIMEOUT;
+	a->netif_timeout = TA_NETIF_TIMEOUT;
+	a->default_cmdsn_depth = TA_DEFAULT_CMDSN_DEPTH;
+	a->generate_node_acls = TA_GENERATE_NODE_ACLS;
+	a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS;
+	a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT;
+	a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
+}
+
+int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
+{
+	if (tpg->tpg_state != TPG_STATE_FREE) {
+		pr_err("Unable to add iSCSI Target Portal Group: %d"
+			" while not in TPG_STATE_FREE state.\n", tpg->tpgt);
+		return -EEXIST;
+	}
+	iscsit_set_default_tpg_attribs(tpg);
+
+	if (iscsi_create_default_params(&tpg->param_list) < 0)
+		goto err_out;
+
+	ISCSI_TPG_ATTRIB(tpg)->tpg = tpg;
+
+	spin_lock(&tpg->tpg_state_lock);
+	tpg->tpg_state	= TPG_STATE_INACTIVE;
+	spin_unlock(&tpg->tpg_state_lock);
+
+	spin_lock(&tiqn->tiqn_tpg_lock);
+	list_add_tail(&tpg->tpg_list, &tiqn->tiqn_tpg_list);
+	tiqn->tiqn_ntpgs++;
+	pr_debug("CORE[%s]_TPG[%hu] - Added iSCSI Target Portal Group\n",
+			tiqn->tiqn, tpg->tpgt);
+	spin_unlock(&tiqn->tiqn_tpg_lock);
+
+	return 0;
+err_out:
+	if (tpg->param_list) {
+		iscsi_release_param_list(tpg->param_list);
+		tpg->param_list = NULL;
+	}
+	kfree(tpg);
+	return -ENOMEM;
+}
+
+int iscsit_tpg_del_portal_group(
+	struct iscsi_tiqn *tiqn,
+	struct iscsi_portal_group *tpg,
+	int force)
+{
+	u8 old_state = tpg->tpg_state;
+
+	spin_lock(&tpg->tpg_state_lock);
+	tpg->tpg_state = TPG_STATE_INACTIVE;
+	spin_unlock(&tpg->tpg_state_lock);
+
+	if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
+		pr_err("Unable to delete iSCSI Target Portal Group:"
+			" %hu while active sessions exist, and force=0\n",
+			tpg->tpgt);
+		tpg->tpg_state = old_state;
+		return -EPERM;
+	}
+
+	core_tpg_clear_object_luns(&tpg->tpg_se_tpg);
+
+	if (tpg->param_list) {
+		iscsi_release_param_list(tpg->param_list);
+		tpg->param_list = NULL;
+	}
+
+	core_tpg_deregister(&tpg->tpg_se_tpg);
+
+	spin_lock(&tpg->tpg_state_lock);
+	tpg->tpg_state = TPG_STATE_FREE;
+	spin_unlock(&tpg->tpg_state_lock);
+
+	spin_lock(&tiqn->tiqn_tpg_lock);
+	tiqn->tiqn_ntpgs--;
+	list_del(&tpg->tpg_list);
+	spin_unlock(&tiqn->tiqn_tpg_lock);
+
+	pr_debug("CORE[%s]_TPG[%hu] - Deleted iSCSI Target Portal Group\n",
+			tiqn->tiqn, tpg->tpgt);
+
+	kfree(tpg);
+	return 0;
+}
+
+int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
+{
+	struct iscsi_param *param;
+	struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+	spin_lock(&tpg->tpg_state_lock);
+	if (tpg->tpg_state == TPG_STATE_ACTIVE) {
+		pr_err("iSCSI target portal group: %hu is already"
+			" active, ignoring request.\n", tpg->tpgt);
+		spin_unlock(&tpg->tpg_state_lock);
+		return -EINVAL;
+	}
+	/*
+	 * Make sure that AuthMethod does not contain None as an option
+	 * unless explictly disabled.  Set the default to CHAP if authentication
+	 * is enforced (as per default), and remove the NONE option.
+	 */
+	param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
+	if (!param) {
+		spin_unlock(&tpg->tpg_state_lock);
+		return -ENOMEM;
+	}
+
+	if (ISCSI_TPG_ATTRIB(tpg)->authentication) {
+		if (!strcmp(param->value, NONE))
+			if (iscsi_update_param_value(param, CHAP) < 0) {
+				spin_unlock(&tpg->tpg_state_lock);
+				return -ENOMEM;
+			}
+		if (iscsit_ta_authentication(tpg, 1) < 0) {
+			spin_unlock(&tpg->tpg_state_lock);
+			return -ENOMEM;
+		}
+	}
+
+	tpg->tpg_state = TPG_STATE_ACTIVE;
+	spin_unlock(&tpg->tpg_state_lock);
+
+	spin_lock(&tiqn->tiqn_tpg_lock);
+	tiqn->tiqn_active_tpgs++;
+	pr_debug("iSCSI_TPG[%hu] - Enabled iSCSI Target Portal Group\n",
+			tpg->tpgt);
+	spin_unlock(&tiqn->tiqn_tpg_lock);
+
+	return 0;
+}
+
+int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force)
+{
+	struct iscsi_tiqn *tiqn;
+	u8 old_state = tpg->tpg_state;
+
+	spin_lock(&tpg->tpg_state_lock);
+	if (tpg->tpg_state == TPG_STATE_INACTIVE) {
+		pr_err("iSCSI Target Portal Group: %hu is already"
+			" inactive, ignoring request.\n", tpg->tpgt);
+		spin_unlock(&tpg->tpg_state_lock);
+		return -EINVAL;
+	}
+	tpg->tpg_state = TPG_STATE_INACTIVE;
+	spin_unlock(&tpg->tpg_state_lock);
+
+	iscsit_clear_tpg_np_login_threads(tpg);
+
+	if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
+		spin_lock(&tpg->tpg_state_lock);
+		tpg->tpg_state = old_state;
+		spin_unlock(&tpg->tpg_state_lock);
+		pr_err("Unable to disable iSCSI Target Portal Group:"
+			" %hu while active sessions exist, and force=0\n",
+			tpg->tpgt);
+		return -EPERM;
+	}
+
+	tiqn = tpg->tpg_tiqn;
+	if (!tiqn || (tpg == iscsit_global->discovery_tpg))
+		return 0;
+
+	spin_lock(&tiqn->tiqn_tpg_lock);
+	tiqn->tiqn_active_tpgs--;
+	pr_debug("iSCSI_TPG[%hu] - Disabled iSCSI Target Portal Group\n",
+			tpg->tpgt);
+	spin_unlock(&tiqn->tiqn_tpg_lock);
+
+	return 0;
+}
+
+struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(
+	struct iscsi_session *sess)
+{
+	struct se_session *se_sess = sess->se_sess;
+	struct se_node_acl *se_nacl = se_sess->se_node_acl;
+	struct iscsi_node_acl *acl = container_of(se_nacl, struct iscsi_node_acl,
+					se_node_acl);
+
+	return &acl->node_attrib;
+}
+
+struct iscsi_tpg_np *iscsit_tpg_locate_child_np(
+	struct iscsi_tpg_np *tpg_np,
+	int network_transport)
+{
+	struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp;
+
+	spin_lock(&tpg_np->tpg_np_parent_lock);
+	list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp,
+			&tpg_np->tpg_np_parent_list, tpg_np_child_list) {
+		if (tpg_np_child->tpg_np->np_network_transport ==
+				network_transport) {
+			spin_unlock(&tpg_np->tpg_np_parent_lock);
+			return tpg_np_child;
+		}
+	}
+	spin_unlock(&tpg_np->tpg_np_parent_lock);
+
+	return NULL;
+}
+
+struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
+	struct iscsi_portal_group *tpg,
+	struct __kernel_sockaddr_storage *sockaddr,
+	char *ip_str,
+	struct iscsi_tpg_np *tpg_np_parent,
+	int network_transport)
+{
+	struct iscsi_np *np;
+	struct iscsi_tpg_np *tpg_np;
+
+	tpg_np = kzalloc(sizeof(struct iscsi_tpg_np), GFP_KERNEL);
+	if (!tpg_np) {
+		pr_err("Unable to allocate memory for"
+				" struct iscsi_tpg_np.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	np = iscsit_add_np(sockaddr, ip_str, network_transport);
+	if (IS_ERR(np)) {
+		kfree(tpg_np);
+		return ERR_CAST(np);
+	}
+
+	INIT_LIST_HEAD(&tpg_np->tpg_np_list);
+	INIT_LIST_HEAD(&tpg_np->tpg_np_child_list);
+	INIT_LIST_HEAD(&tpg_np->tpg_np_parent_list);
+	spin_lock_init(&tpg_np->tpg_np_parent_lock);
+	tpg_np->tpg_np		= np;
+	tpg_np->tpg		= tpg;
+
+	spin_lock(&tpg->tpg_np_lock);
+	list_add_tail(&tpg_np->tpg_np_list, &tpg->tpg_gnp_list);
+	tpg->num_tpg_nps++;
+	if (tpg->tpg_tiqn)
+		tpg->tpg_tiqn->tiqn_num_tpg_nps++;
+	spin_unlock(&tpg->tpg_np_lock);
+
+	if (tpg_np_parent) {
+		tpg_np->tpg_np_parent = tpg_np_parent;
+		spin_lock(&tpg_np_parent->tpg_np_parent_lock);
+		list_add_tail(&tpg_np->tpg_np_child_list,
+			&tpg_np_parent->tpg_np_parent_list);
+		spin_unlock(&tpg_np_parent->tpg_np_parent_lock);
+	}
+
+	pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n",
+		tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
+		(np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP");
+
+	return tpg_np;
+}
+
+static int iscsit_tpg_release_np(
+	struct iscsi_tpg_np *tpg_np,
+	struct iscsi_portal_group *tpg,
+	struct iscsi_np *np)
+{
+	iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
+
+	pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n",
+		tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
+		(np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP");
+
+	tpg_np->tpg_np = NULL;
+	tpg_np->tpg = NULL;
+	kfree(tpg_np);
+	/*
+	 * iscsit_del_np() will shutdown struct iscsi_np when last TPG reference is released.
+	 */
+	return iscsit_del_np(np);
+}
+
+int iscsit_tpg_del_network_portal(
+	struct iscsi_portal_group *tpg,
+	struct iscsi_tpg_np *tpg_np)
+{
+	struct iscsi_np *np;
+	struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp;
+	int ret = 0;
+
+	np = tpg_np->tpg_np;
+	if (!np) {
+		pr_err("Unable to locate struct iscsi_np from"
+				" struct iscsi_tpg_np\n");
+		return -EINVAL;
+	}
+
+	if (!tpg_np->tpg_np_parent) {
+		/*
+		 * We are the parent tpg network portal.  Release all of the
+		 * child tpg_np's (eg: the non ISCSI_TCP ones) on our parent
+		 * list first.
+		 */
+		list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp,
+				&tpg_np->tpg_np_parent_list,
+				tpg_np_child_list) {
+			ret = iscsit_tpg_del_network_portal(tpg, tpg_np_child);
+			if (ret < 0)
+				pr_err("iscsit_tpg_del_network_portal()"
+					" failed: %d\n", ret);
+		}
+	} else {
+		/*
+		 * We are not the parent ISCSI_TCP tpg network portal.  Release
+		 * our own network portals from the child list.
+		 */
+		spin_lock(&tpg_np->tpg_np_parent->tpg_np_parent_lock);
+		list_del(&tpg_np->tpg_np_child_list);
+		spin_unlock(&tpg_np->tpg_np_parent->tpg_np_parent_lock);
+	}
+
+	spin_lock(&tpg->tpg_np_lock);
+	list_del(&tpg_np->tpg_np_list);
+	tpg->num_tpg_nps--;
+	if (tpg->tpg_tiqn)
+		tpg->tpg_tiqn->tiqn_num_tpg_nps--;
+	spin_unlock(&tpg->tpg_np_lock);
+
+	return iscsit_tpg_release_np(tpg_np, tpg, np);
+}
+
+int iscsit_tpg_set_initiator_node_queue_depth(
+	struct iscsi_portal_group *tpg,
+	unsigned char *initiatorname,
+	u32 queue_depth,
+	int force)
+{
+	return core_tpg_set_initiator_node_queue_depth(&tpg->tpg_se_tpg,
+		initiatorname, queue_depth, force);
+}
+
+int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
+{
+	unsigned char buf1[256], buf2[256], *none = NULL;
+	int len;
+	struct iscsi_param *param;
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if ((authentication != 1) && (authentication != 0)) {
+		pr_err("Illegal value for authentication parameter:"
+			" %u, ignoring request.\n", authentication);
+		return -1;
+	}
+
+	memset(buf1, 0, sizeof(buf1));
+	memset(buf2, 0, sizeof(buf2));
+
+	param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
+	if (!param)
+		return -EINVAL;
+
+	if (authentication) {
+		snprintf(buf1, sizeof(buf1), "%s", param->value);
+		none = strstr(buf1, NONE);
+		if (!none)
+			goto out;
+		if (!strncmp(none + 4, ",", 1)) {
+			if (!strcmp(buf1, none))
+				sprintf(buf2, "%s", none+5);
+			else {
+				none--;
+				*none = '\0';
+				len = sprintf(buf2, "%s", buf1);
+				none += 5;
+				sprintf(buf2 + len, "%s", none);
+			}
+		} else {
+			none--;
+			*none = '\0';
+			sprintf(buf2, "%s", buf1);
+		}
+		if (iscsi_update_param_value(param, buf2) < 0)
+			return -EINVAL;
+	} else {
+		snprintf(buf1, sizeof(buf1), "%s", param->value);
+		none = strstr(buf1, NONE);
+		if ((none))
+			goto out;
+		strncat(buf1, ",", strlen(","));
+		strncat(buf1, NONE, strlen(NONE));
+		if (iscsi_update_param_value(param, buf1) < 0)
+			return -EINVAL;
+	}
+
+out:
+	a->authentication = authentication;
+	pr_debug("%s iSCSI Authentication Methods for TPG: %hu.\n",
+		a->authentication ? "Enforcing" : "Disabling", tpg->tpgt);
+
+	return 0;
+}
+
+int iscsit_ta_login_timeout(
+	struct iscsi_portal_group *tpg,
+	u32 login_timeout)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if (login_timeout > TA_LOGIN_TIMEOUT_MAX) {
+		pr_err("Requested Login Timeout %u larger than maximum"
+			" %u\n", login_timeout, TA_LOGIN_TIMEOUT_MAX);
+		return -EINVAL;
+	} else if (login_timeout < TA_LOGIN_TIMEOUT_MIN) {
+		pr_err("Requested Logout Timeout %u smaller than"
+			" minimum %u\n", login_timeout, TA_LOGIN_TIMEOUT_MIN);
+		return -EINVAL;
+	}
+
+	a->login_timeout = login_timeout;
+	pr_debug("Set Logout Timeout to %u for Target Portal Group"
+		" %hu\n", a->login_timeout, tpg->tpgt);
+
+	return 0;
+}
+
+int iscsit_ta_netif_timeout(
+	struct iscsi_portal_group *tpg,
+	u32 netif_timeout)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if (netif_timeout > TA_NETIF_TIMEOUT_MAX) {
+		pr_err("Requested Network Interface Timeout %u larger"
+			" than maximum %u\n", netif_timeout,
+				TA_NETIF_TIMEOUT_MAX);
+		return -EINVAL;
+	} else if (netif_timeout < TA_NETIF_TIMEOUT_MIN) {
+		pr_err("Requested Network Interface Timeout %u smaller"
+			" than minimum %u\n", netif_timeout,
+				TA_NETIF_TIMEOUT_MIN);
+		return -EINVAL;
+	}
+
+	a->netif_timeout = netif_timeout;
+	pr_debug("Set Network Interface Timeout to %u for"
+		" Target Portal Group %hu\n", a->netif_timeout, tpg->tpgt);
+
+	return 0;
+}
+
+int iscsit_ta_generate_node_acls(
+	struct iscsi_portal_group *tpg,
+	u32 flag)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if ((flag != 0) && (flag != 1)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	a->generate_node_acls = flag;
+	pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n",
+		tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled");
+
+	if (flag == 1 && a->cache_dynamic_acls == 0) {
+		pr_debug("Explicitly setting cache_dynamic_acls=1 when "
+			"generate_node_acls=1\n");
+		a->cache_dynamic_acls = 1;
+	}
+
+	return 0;
+}
+
+int iscsit_ta_default_cmdsn_depth(
+	struct iscsi_portal_group *tpg,
+	u32 tcq_depth)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if (tcq_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
+		pr_err("Requested Default Queue Depth: %u larger"
+			" than maximum %u\n", tcq_depth,
+				TA_DEFAULT_CMDSN_DEPTH_MAX);
+		return -EINVAL;
+	} else if (tcq_depth < TA_DEFAULT_CMDSN_DEPTH_MIN) {
+		pr_err("Requested Default Queue Depth: %u smaller"
+			" than minimum %u\n", tcq_depth,
+				TA_DEFAULT_CMDSN_DEPTH_MIN);
+		return -EINVAL;
+	}
+
+	a->default_cmdsn_depth = tcq_depth;
+	pr_debug("iSCSI_TPG[%hu] - Set Default CmdSN TCQ Depth to %u\n",
+		tpg->tpgt, a->default_cmdsn_depth);
+
+	return 0;
+}
+
+int iscsit_ta_cache_dynamic_acls(
+	struct iscsi_portal_group *tpg,
+	u32 flag)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if ((flag != 0) && (flag != 1)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	if (a->generate_node_acls == 1 && flag == 0) {
+		pr_debug("Skipping cache_dynamic_acls=0 when"
+			" generate_node_acls=1\n");
+		return 0;
+	}
+
+	a->cache_dynamic_acls = flag;
+	pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group"
+		" ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ?
+		"Enabled" : "Disabled");
+
+	return 0;
+}
+
+int iscsit_ta_demo_mode_write_protect(
+	struct iscsi_portal_group *tpg,
+	u32 flag)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if ((flag != 0) && (flag != 1)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	a->demo_mode_write_protect = flag;
+	pr_debug("iSCSI_TPG[%hu] - Demo Mode Write Protect bit: %s\n",
+		tpg->tpgt, (a->demo_mode_write_protect) ? "ON" : "OFF");
+
+	return 0;
+}
+
+int iscsit_ta_prod_mode_write_protect(
+	struct iscsi_portal_group *tpg,
+	u32 flag)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if ((flag != 0) && (flag != 1)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	a->prod_mode_write_protect = flag;
+	pr_debug("iSCSI_TPG[%hu] - Production Mode Write Protect bit:"
+		" %s\n", tpg->tpgt, (a->prod_mode_write_protect) ?
+		"ON" : "OFF");
+
+	return 0;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_tpg.h b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_tpg.h
new file mode 100644
index 0000000..dda48c1
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_tpg.h
@@ -0,0 +1,41 @@
+#ifndef ISCSI_TARGET_TPG_H
+#define ISCSI_TARGET_TPG_H
+
+extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *, u16);
+extern int iscsit_load_discovery_tpg(void);
+extern void iscsit_release_discovery_tpg(void);
+extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *,
+			struct iscsi_np *);
+extern int iscsit_get_tpg(struct iscsi_portal_group *);
+extern void iscsit_put_tpg(struct iscsi_portal_group *);
+extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *);
+extern void iscsit_tpg_dump_params(struct iscsi_portal_group *);
+extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *);
+extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *,
+			int);
+extern int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *);
+extern int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *, int);
+extern struct iscsi_node_acl *iscsit_tpg_add_initiator_node_acl(
+			struct iscsi_portal_group *, const char *, u32);
+extern void iscsit_tpg_del_initiator_node_acl(struct iscsi_portal_group *,
+			struct se_node_acl *);
+extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsi_session *);
+extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *);
+extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int);
+extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *,
+			struct __kernel_sockaddr_storage *, char *, struct iscsi_tpg_np *,
+			int);
+extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
+			struct iscsi_tpg_np *);
+extern int iscsit_tpg_set_initiator_node_queue_depth(struct iscsi_portal_group *,
+			unsigned char *, u32, int);
+extern int iscsit_ta_authentication(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_login_timeout(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_netif_timeout(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_generate_node_acls(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
+
+#endif /* ISCSI_TARGET_TPG_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_tq.c b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_tq.c
new file mode 100644
index 0000000..4f447fd
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_tq.c
@@ -0,0 +1,527 @@
+/*******************************************************************************
+ * This file contains the iSCSI Login Thread and Thread Queue functions.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/bitmap.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_tq.h"
+#include "iscsi_target.h"
+
+static LIST_HEAD(inactive_ts_list);
+static DEFINE_SPINLOCK(inactive_ts_lock);
+static DEFINE_SPINLOCK(ts_bitmap_lock);
+
+extern void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
+{
+	if (!list_empty(&ts->ts_list)) {
+		WARN_ON(1);
+		return;
+	}
+	spin_lock(&inactive_ts_lock);
+	list_add_tail(&ts->ts_list, &inactive_ts_list);
+	iscsit_global->inactive_ts++;
+	spin_unlock(&inactive_ts_lock);
+}
+
+static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
+{
+	struct iscsi_thread_set *ts;
+
+	spin_lock(&inactive_ts_lock);
+	if (list_empty(&inactive_ts_list)) {
+		spin_unlock(&inactive_ts_lock);
+		return NULL;
+	}
+
+	list_for_each_entry(ts, &inactive_ts_list, ts_list)
+		break;
+
+	list_del_init(&ts->ts_list);
+	iscsit_global->inactive_ts--;
+	spin_unlock(&inactive_ts_lock);
+
+	return ts;
+}
+
+extern int iscsi_allocate_thread_sets(u32 thread_pair_count)
+{
+	int allocated_thread_pair_count = 0, i, thread_id;
+	struct iscsi_thread_set *ts = NULL;
+
+	for (i = 0; i < thread_pair_count; i++) {
+		ts = kzalloc(sizeof(struct iscsi_thread_set), GFP_KERNEL);
+		if (!ts) {
+			pr_err("Unable to allocate memory for"
+					" thread set.\n");
+			return allocated_thread_pair_count;
+		}
+		/*
+		 * Locate the next available regision in the thread_set_bitmap
+		 */
+		spin_lock(&ts_bitmap_lock);
+		thread_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
+				iscsit_global->ts_bitmap_count, get_order(1));
+		spin_unlock(&ts_bitmap_lock);
+		if (thread_id < 0) {
+			pr_err("bitmap_find_free_region() failed for"
+				" thread_set_bitmap\n");
+			kfree(ts);
+			return allocated_thread_pair_count;
+		}
+
+		ts->thread_id = thread_id;
+		ts->status = ISCSI_THREAD_SET_FREE;
+		INIT_LIST_HEAD(&ts->ts_list);
+		spin_lock_init(&ts->ts_state_lock);
+		init_completion(&ts->rx_post_start_comp);
+		init_completion(&ts->tx_post_start_comp);
+		init_completion(&ts->rx_restart_comp);
+		init_completion(&ts->tx_restart_comp);
+		init_completion(&ts->rx_start_comp);
+		init_completion(&ts->tx_start_comp);
+
+		ts->create_threads = 1;
+		ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s",
+					ISCSI_TX_THREAD_NAME);
+		if (IS_ERR(ts->tx_thread)) {
+			dump_stack();
+			pr_err("Unable to start iscsi_target_tx_thread\n");
+			break;
+		}
+
+		ts->rx_thread = kthread_run(iscsi_target_rx_thread, ts, "%s",
+					ISCSI_RX_THREAD_NAME);
+		if (IS_ERR(ts->rx_thread)) {
+			kthread_stop(ts->tx_thread);
+			pr_err("Unable to start iscsi_target_rx_thread\n");
+			break;
+		}
+		ts->create_threads = 0;
+
+		iscsi_add_ts_to_inactive_list(ts);
+		allocated_thread_pair_count++;
+	}
+
+	pr_debug("Spawned %d thread set(s) (%d total threads).\n",
+		allocated_thread_pair_count, allocated_thread_pair_count * 2);
+	return allocated_thread_pair_count;
+}
+
+extern void iscsi_deallocate_thread_sets(void)
+{
+	u32 released_count = 0;
+	struct iscsi_thread_set *ts = NULL;
+
+	while ((ts = iscsi_get_ts_from_inactive_list())) {
+
+		spin_lock_bh(&ts->ts_state_lock);
+		ts->status = ISCSI_THREAD_SET_DIE;
+		spin_unlock_bh(&ts->ts_state_lock);
+
+		if (ts->rx_thread) {
+			send_sig(SIGINT, ts->rx_thread, 1);
+			kthread_stop(ts->rx_thread);
+		}
+		if (ts->tx_thread) {
+			send_sig(SIGINT, ts->tx_thread, 1);
+			kthread_stop(ts->tx_thread);
+		}
+		/*
+		 * Release this thread_id in the thread_set_bitmap
+		 */
+		spin_lock(&ts_bitmap_lock);
+		bitmap_release_region(iscsit_global->ts_bitmap,
+				ts->thread_id, get_order(1));
+		spin_unlock(&ts_bitmap_lock);
+
+		released_count++;
+		kfree(ts);
+	}
+
+	if (released_count)
+		pr_debug("Stopped %d thread set(s) (%d total threads)."
+			"\n", released_count, released_count * 2);
+}
+
+static void iscsi_deallocate_extra_thread_sets(void)
+{
+	u32 orig_count, released_count = 0;
+	struct iscsi_thread_set *ts = NULL;
+
+	orig_count = TARGET_THREAD_SET_COUNT;
+
+	while ((iscsit_global->inactive_ts + 1) > orig_count) {
+		ts = iscsi_get_ts_from_inactive_list();
+		if (!ts)
+			break;
+
+		spin_lock_bh(&ts->ts_state_lock);
+		ts->status = ISCSI_THREAD_SET_DIE;
+		spin_unlock_bh(&ts->ts_state_lock);
+
+		if (ts->rx_thread) {
+			send_sig(SIGINT, ts->rx_thread, 1);
+			kthread_stop(ts->rx_thread);
+		}
+		if (ts->tx_thread) {
+			send_sig(SIGINT, ts->tx_thread, 1);
+			kthread_stop(ts->tx_thread);
+		}
+		/*
+		 * Release this thread_id in the thread_set_bitmap
+		 */
+		spin_lock(&ts_bitmap_lock);
+		bitmap_release_region(iscsit_global->ts_bitmap,
+				ts->thread_id, get_order(1));
+		spin_unlock(&ts_bitmap_lock);
+
+		released_count++;
+		kfree(ts);
+	}
+
+	if (released_count) {
+		pr_debug("Stopped %d thread set(s) (%d total threads)."
+			"\n", released_count, released_count * 2);
+	}
+}
+
+void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
+{
+	spin_lock_bh(&ts->ts_state_lock);
+	conn->thread_set = ts;
+	ts->conn = conn;
+	spin_unlock_bh(&ts->ts_state_lock);
+	/*
+	 * Start up the RX thread and wait on rx_post_start_comp.  The RX
+	 * Thread will then do the same for the TX Thread in
+	 * iscsi_rx_thread_pre_handler().
+	 */
+	complete(&ts->rx_start_comp);
+	wait_for_completion(&ts->rx_post_start_comp);
+}
+
+struct iscsi_thread_set *iscsi_get_thread_set(void)
+{
+	int allocate_ts = 0;
+	struct completion comp;
+	struct iscsi_thread_set *ts = NULL;
+	/*
+	 * If no inactive thread set is available on the first call to
+	 * iscsi_get_ts_from_inactive_list(), sleep for a second and
+	 * try again.  If still none are available after two attempts,
+	 * allocate a set ourselves.
+	 */
+get_set:
+	ts = iscsi_get_ts_from_inactive_list();
+	if (!ts) {
+		if (allocate_ts == 2)
+			iscsi_allocate_thread_sets(1);
+
+		init_completion(&comp);
+		wait_for_completion_timeout(&comp, 1 * HZ);
+
+		allocate_ts++;
+		goto get_set;
+	}
+
+	ts->delay_inactive = 1;
+	ts->signal_sent = 0;
+	ts->thread_count = 2;
+	init_completion(&ts->rx_restart_comp);
+	init_completion(&ts->tx_restart_comp);
+
+	return ts;
+}
+
+void iscsi_set_thread_clear(struct iscsi_conn *conn, u8 thread_clear)
+{
+	struct iscsi_thread_set *ts = NULL;
+
+	if (!conn->thread_set) {
+		pr_err("struct iscsi_conn->thread_set is NULL\n");
+		return;
+	}
+	ts = conn->thread_set;
+
+	spin_lock_bh(&ts->ts_state_lock);
+	ts->thread_clear &= ~thread_clear;
+
+	if ((thread_clear & ISCSI_CLEAR_RX_THREAD) &&
+	    (ts->blocked_threads & ISCSI_BLOCK_RX_THREAD))
+		complete(&ts->rx_restart_comp);
+	else if ((thread_clear & ISCSI_CLEAR_TX_THREAD) &&
+		 (ts->blocked_threads & ISCSI_BLOCK_TX_THREAD))
+		complete(&ts->tx_restart_comp);
+	spin_unlock_bh(&ts->ts_state_lock);
+}
+
+void iscsi_set_thread_set_signal(struct iscsi_conn *conn, u8 signal_sent)
+{
+	struct iscsi_thread_set *ts = NULL;
+
+	if (!conn->thread_set) {
+		pr_err("struct iscsi_conn->thread_set is NULL\n");
+		return;
+	}
+	ts = conn->thread_set;
+
+	spin_lock_bh(&ts->ts_state_lock);
+	ts->signal_sent |= signal_sent;
+	spin_unlock_bh(&ts->ts_state_lock);
+}
+
+int iscsi_release_thread_set(struct iscsi_conn *conn)
+{
+	int thread_called = 0;
+	struct iscsi_thread_set *ts = NULL;
+
+	if (!conn || !conn->thread_set) {
+		pr_err("connection or thread set pointer is NULL\n");
+		BUG();
+	}
+	ts = conn->thread_set;
+
+	spin_lock_bh(&ts->ts_state_lock);
+	ts->status = ISCSI_THREAD_SET_RESET;
+
+	if (!strncmp(current->comm, ISCSI_RX_THREAD_NAME,
+			strlen(ISCSI_RX_THREAD_NAME)))
+		thread_called = ISCSI_RX_THREAD;
+	else if (!strncmp(current->comm, ISCSI_TX_THREAD_NAME,
+			strlen(ISCSI_TX_THREAD_NAME)))
+		thread_called = ISCSI_TX_THREAD;
+
+	if (ts->rx_thread && (thread_called == ISCSI_TX_THREAD) &&
+	   (ts->thread_clear & ISCSI_CLEAR_RX_THREAD)) {
+
+		if (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD)) {
+			send_sig(SIGINT, ts->rx_thread, 1);
+			ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD;
+		}
+		ts->blocked_threads |= ISCSI_BLOCK_RX_THREAD;
+		spin_unlock_bh(&ts->ts_state_lock);
+		wait_for_completion(&ts->rx_restart_comp);
+		spin_lock_bh(&ts->ts_state_lock);
+		ts->blocked_threads &= ~ISCSI_BLOCK_RX_THREAD;
+	}
+	if (ts->tx_thread && (thread_called == ISCSI_RX_THREAD) &&
+	   (ts->thread_clear & ISCSI_CLEAR_TX_THREAD)) {
+
+		if (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD)) {
+			send_sig(SIGINT, ts->tx_thread, 1);
+			ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD;
+		}
+		ts->blocked_threads |= ISCSI_BLOCK_TX_THREAD;
+		spin_unlock_bh(&ts->ts_state_lock);
+		wait_for_completion(&ts->tx_restart_comp);
+		spin_lock_bh(&ts->ts_state_lock);
+		ts->blocked_threads &= ~ISCSI_BLOCK_TX_THREAD;
+	}
+
+	ts->conn = NULL;
+	ts->status = ISCSI_THREAD_SET_FREE;
+	spin_unlock_bh(&ts->ts_state_lock);
+
+	return 0;
+}
+
+int iscsi_thread_set_force_reinstatement(struct iscsi_conn *conn)
+{
+	struct iscsi_thread_set *ts;
+
+	if (!conn->thread_set)
+		return -1;
+	ts = conn->thread_set;
+
+	spin_lock_bh(&ts->ts_state_lock);
+	if (ts->status != ISCSI_THREAD_SET_ACTIVE) {
+		spin_unlock_bh(&ts->ts_state_lock);
+		return -1;
+	}
+
+	if (ts->tx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD))) {
+		send_sig(SIGINT, ts->tx_thread, 1);
+		ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD;
+	}
+	if (ts->rx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD))) {
+		send_sig(SIGINT, ts->rx_thread, 1);
+		ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD;
+	}
+	spin_unlock_bh(&ts->ts_state_lock);
+
+	return 0;
+}
+
+static void iscsi_check_to_add_additional_sets(void)
+{
+	int thread_sets_add;
+
+	spin_lock(&inactive_ts_lock);
+	thread_sets_add = iscsit_global->inactive_ts;
+	spin_unlock(&inactive_ts_lock);
+	if (thread_sets_add == 1)
+		iscsi_allocate_thread_sets(1);
+}
+
+static int iscsi_signal_thread_pre_handler(struct iscsi_thread_set *ts)
+{
+	spin_lock_bh(&ts->ts_state_lock);
+	if ((ts->status == ISCSI_THREAD_SET_DIE) || signal_pending(current)) {
+		spin_unlock_bh(&ts->ts_state_lock);
+		return -1;
+	}
+	spin_unlock_bh(&ts->ts_state_lock);
+
+	return 0;
+}
+
+struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
+{
+	int ret;
+
+	spin_lock_bh(&ts->ts_state_lock);
+	if (ts->create_threads) {
+		spin_unlock_bh(&ts->ts_state_lock);
+		goto sleep;
+	}
+
+	flush_signals(current);
+
+	if (ts->delay_inactive && (--ts->thread_count == 0)) {
+		spin_unlock_bh(&ts->ts_state_lock);
+
+		if (!iscsit_global->in_shutdown)
+			iscsi_deallocate_extra_thread_sets();
+
+		iscsi_add_ts_to_inactive_list(ts);
+		spin_lock_bh(&ts->ts_state_lock);
+	}
+
+	if ((ts->status == ISCSI_THREAD_SET_RESET) &&
+	    (ts->thread_clear & ISCSI_CLEAR_RX_THREAD))
+		complete(&ts->rx_restart_comp);
+
+	ts->thread_clear &= ~ISCSI_CLEAR_RX_THREAD;
+	spin_unlock_bh(&ts->ts_state_lock);
+sleep:
+	ret = wait_for_completion_interruptible(&ts->rx_start_comp);
+	if (ret != 0)
+		return NULL;
+
+	if (iscsi_signal_thread_pre_handler(ts) < 0)
+		return NULL;
+
+	if (!ts->conn) {
+		pr_err("struct iscsi_thread_set->conn is NULL for"
+			" thread_id: %d, going back to sleep\n", ts->thread_id);
+		goto sleep;
+	}
+	iscsi_check_to_add_additional_sets();
+	/*
+	 * The RX Thread starts up the TX Thread and sleeps.
+	 */
+	ts->thread_clear |= ISCSI_CLEAR_RX_THREAD;
+	complete(&ts->tx_start_comp);
+	wait_for_completion(&ts->tx_post_start_comp);
+
+	return ts->conn;
+}
+
+struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
+{
+	int ret;
+
+	spin_lock_bh(&ts->ts_state_lock);
+	if (ts->create_threads) {
+		spin_unlock_bh(&ts->ts_state_lock);
+		goto sleep;
+	}
+
+	flush_signals(current);
+
+	if (ts->delay_inactive && (--ts->thread_count == 0)) {
+		spin_unlock_bh(&ts->ts_state_lock);
+
+		if (!iscsit_global->in_shutdown)
+			iscsi_deallocate_extra_thread_sets();
+
+		iscsi_add_ts_to_inactive_list(ts);
+		spin_lock_bh(&ts->ts_state_lock);
+	}
+	if ((ts->status == ISCSI_THREAD_SET_RESET) &&
+	    (ts->thread_clear & ISCSI_CLEAR_TX_THREAD))
+		complete(&ts->tx_restart_comp);
+
+	ts->thread_clear &= ~ISCSI_CLEAR_TX_THREAD;
+	spin_unlock_bh(&ts->ts_state_lock);
+sleep:
+	ret = wait_for_completion_interruptible(&ts->tx_start_comp);
+	if (ret != 0)
+		return NULL;
+
+	if (iscsi_signal_thread_pre_handler(ts) < 0)
+		return NULL;
+
+	if (!ts->conn) {
+		pr_err("struct iscsi_thread_set->conn is NULL for "
+			" thread_id: %d, going back to sleep\n",
+			ts->thread_id);
+		goto sleep;
+	}
+
+	iscsi_check_to_add_additional_sets();
+	/*
+	 * From the TX thread, up the tx_post_start_comp that the RX Thread is
+	 * sleeping on in iscsi_rx_thread_pre_handler(), then up the
+	 * rx_post_start_comp that iscsi_activate_thread_set() is sleeping on.
+	 */
+	ts->thread_clear |= ISCSI_CLEAR_TX_THREAD;
+	complete(&ts->tx_post_start_comp);
+	complete(&ts->rx_post_start_comp);
+
+	spin_lock_bh(&ts->ts_state_lock);
+	ts->status = ISCSI_THREAD_SET_ACTIVE;
+	spin_unlock_bh(&ts->ts_state_lock);
+
+	return ts->conn;
+}
+
+int iscsi_thread_set_init(void)
+{
+	int size;
+
+	iscsit_global->ts_bitmap_count = ISCSI_TS_BITMAP_BITS;
+
+	size = BITS_TO_LONGS(iscsit_global->ts_bitmap_count) * sizeof(long);
+	iscsit_global->ts_bitmap = kzalloc(size, GFP_KERNEL);
+	if (!iscsit_global->ts_bitmap) {
+		pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+void iscsi_thread_set_free(void)
+{
+	kfree(iscsit_global->ts_bitmap);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_tq.h b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_tq.h
new file mode 100644
index 0000000..26e6a95
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_tq.h
@@ -0,0 +1,88 @@
+#ifndef ISCSI_THREAD_QUEUE_H
+#define ISCSI_THREAD_QUEUE_H
+
+/*
+ * Defines for thread sets.
+ */
+extern int iscsi_thread_set_force_reinstatement(struct iscsi_conn *);
+extern void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *);
+extern int iscsi_allocate_thread_sets(u32);
+extern void iscsi_deallocate_thread_sets(void);
+extern void iscsi_activate_thread_set(struct iscsi_conn *, struct iscsi_thread_set *);
+extern struct iscsi_thread_set *iscsi_get_thread_set(void);
+extern void iscsi_set_thread_clear(struct iscsi_conn *, u8);
+extern void iscsi_set_thread_set_signal(struct iscsi_conn *, u8);
+extern int iscsi_release_thread_set(struct iscsi_conn *);
+extern struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *);
+extern struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *);
+extern int iscsi_thread_set_init(void);
+extern void iscsi_thread_set_free(void);
+
+extern int iscsi_target_tx_thread(void *);
+extern int iscsi_target_rx_thread(void *);
+
+#define TARGET_THREAD_SET_COUNT			4
+
+#define ISCSI_RX_THREAD                         1
+#define ISCSI_TX_THREAD                         2
+#define ISCSI_RX_THREAD_NAME			"iscsi_trx"
+#define ISCSI_TX_THREAD_NAME			"iscsi_ttx"
+#define ISCSI_BLOCK_RX_THREAD			0x1
+#define ISCSI_BLOCK_TX_THREAD			0x2
+#define ISCSI_CLEAR_RX_THREAD			0x1
+#define ISCSI_CLEAR_TX_THREAD			0x2
+#define ISCSI_SIGNAL_RX_THREAD			0x1
+#define ISCSI_SIGNAL_TX_THREAD			0x2
+
+/* struct iscsi_thread_set->status */
+#define ISCSI_THREAD_SET_FREE			1
+#define ISCSI_THREAD_SET_ACTIVE			2
+#define ISCSI_THREAD_SET_DIE			3
+#define ISCSI_THREAD_SET_RESET			4
+#define ISCSI_THREAD_SET_DEALLOCATE_THREADS	5
+
+/* By default allow a maximum of 32K iSCSI connections */
+#define ISCSI_TS_BITMAP_BITS			32768
+
+struct iscsi_thread_set {
+	/* flags used for blocking and restarting sets */
+	int	blocked_threads;
+	/* flag for creating threads */
+	int	create_threads;
+	/* flag for delaying readding to inactive list */
+	int	delay_inactive;
+	/* status for thread set */
+	int	status;
+	/* which threads have had signals sent */
+	int	signal_sent;
+	/* flag for which threads exited first */
+	int	thread_clear;
+	/* Active threads in the thread set */
+	int	thread_count;
+	/* Unique thread ID */
+	u32	thread_id;
+	/* pointer to connection if set is active */
+	struct iscsi_conn	*conn;
+	/* used for controlling ts state accesses */
+	spinlock_t	ts_state_lock;
+	/* Used for rx side post startup */
+	struct completion	rx_post_start_comp;
+	/* Used for tx side post startup */
+	struct completion	tx_post_start_comp;
+	/* used for restarting thread queue */
+	struct completion	rx_restart_comp;
+	/* used for restarting thread queue */
+	struct completion	tx_restart_comp;
+	/* used for normal unused blocking */
+	struct completion	rx_start_comp;
+	/* used for normal unused blocking */
+	struct completion	tx_start_comp;
+	/* OS descriptor for rx thread */
+	struct task_struct	*rx_thread;
+	/* OS descriptor for tx thread */
+	struct task_struct	*tx_thread;
+	/* struct iscsi_thread_set in list list head*/
+	struct list_head	ts_list;
+};
+
+#endif   /*** ISCSI_THREAD_QUEUE_H ***/
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_util.c b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_util.c
new file mode 100644
index 0000000..8d57ab3
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_util.c
@@ -0,0 +1,1629 @@
+/*******************************************************************************
+ * This file contains the iSCSI Target specific utility functions.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/list.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_tq.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+
+#define PRINT_BUFF(buff, len)					\
+{								\
+	int zzz;						\
+								\
+	pr_debug("%d:\n", __LINE__);				\
+	for (zzz = 0; zzz < len; zzz++) {			\
+		if (zzz % 16 == 0) {				\
+			if (zzz)				\
+				pr_debug("\n");			\
+			pr_debug("%4i: ", zzz);			\
+		}						\
+		pr_debug("%02x ", (unsigned char) (buff)[zzz]);	\
+	}							\
+	if ((len + 1) % 16)					\
+		pr_debug("\n");					\
+}
+
+extern struct list_head g_tiqn_list;
+extern spinlock_t tiqn_lock;
+
+/*
+ *	Called with cmd->r2t_lock held.
+ */
+int iscsit_add_r2t_to_list(
+	struct iscsi_cmd *cmd,
+	u32 offset,
+	u32 xfer_len,
+	int recovery,
+	u32 r2t_sn)
+{
+	struct iscsi_r2t *r2t;
+
+	r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
+	if (!r2t) {
+		pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
+		return -1;
+	}
+	INIT_LIST_HEAD(&r2t->r2t_list);
+
+	r2t->recovery_r2t = recovery;
+	r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn;
+	r2t->offset = offset;
+	r2t->xfer_len = xfer_len;
+	list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list);
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
+
+	spin_lock_bh(&cmd->r2t_lock);
+	return 0;
+}
+
+struct iscsi_r2t *iscsit_get_r2t_for_eos(
+	struct iscsi_cmd *cmd,
+	u32 offset,
+	u32 length)
+{
+	struct iscsi_r2t *r2t;
+
+	spin_lock_bh(&cmd->r2t_lock);
+	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+		if ((r2t->offset <= offset) &&
+		    (r2t->offset + r2t->xfer_len) >= (offset + length)) {
+			spin_unlock_bh(&cmd->r2t_lock);
+			return r2t;
+		}
+	}
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	pr_err("Unable to locate R2T for Offset: %u, Length:"
+			" %u\n", offset, length);
+	return NULL;
+}
+
+struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
+{
+	struct iscsi_r2t *r2t;
+
+	spin_lock_bh(&cmd->r2t_lock);
+	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+		if (!r2t->sent_r2t) {
+			spin_unlock_bh(&cmd->r2t_lock);
+			return r2t;
+		}
+	}
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	pr_err("Unable to locate next R2T to send for ITT:"
+			" 0x%08x.\n", cmd->init_task_tag);
+	return NULL;
+}
+
+/*
+ *	Called with cmd->r2t_lock held.
+ */
+void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd)
+{
+	list_del(&r2t->r2t_list);
+	kmem_cache_free(lio_r2t_cache, r2t);
+}
+
+void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
+{
+	struct iscsi_r2t *r2t, *r2t_tmp;
+
+	spin_lock_bh(&cmd->r2t_lock);
+	list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list)
+		iscsit_free_r2t(r2t, cmd);
+	spin_unlock_bh(&cmd->r2t_lock);
+}
+
+/*
+ * May be called from software interrupt (timer) context for allocating
+ * iSCSI NopINs.
+ */
+struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
+{
+	struct iscsi_cmd *cmd;
+
+	cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask);
+	if (!cmd) {
+		pr_err("Unable to allocate memory for struct iscsi_cmd.\n");
+		return NULL;
+	}
+
+	cmd->conn	= conn;
+	INIT_LIST_HEAD(&cmd->i_list);
+	INIT_LIST_HEAD(&cmd->datain_list);
+	INIT_LIST_HEAD(&cmd->cmd_r2t_list);
+	init_completion(&cmd->reject_comp);
+	spin_lock_init(&cmd->datain_lock);
+	spin_lock_init(&cmd->dataout_timeout_lock);
+	spin_lock_init(&cmd->istate_lock);
+	spin_lock_init(&cmd->error_lock);
+	spin_lock_init(&cmd->r2t_lock);
+
+	return cmd;
+}
+
+/*
+ * Called from iscsi_handle_scsi_cmd()
+ */
+struct iscsi_cmd *iscsit_allocate_se_cmd(
+	struct iscsi_conn *conn,
+	u32 data_length,
+	int data_direction,
+	int iscsi_task_attr)
+{
+	struct iscsi_cmd *cmd;
+	struct se_cmd *se_cmd;
+	int sam_task_attr;
+
+	cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+	if (!cmd)
+		return NULL;
+
+	cmd->data_direction = data_direction;
+	cmd->data_length = data_length;
+	/*
+	 * Figure out the SAM Task Attribute for the incoming SCSI CDB
+	 */
+	if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
+	    (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
+		sam_task_attr = MSG_SIMPLE_TAG;
+	else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
+		sam_task_attr = MSG_ORDERED_TAG;
+	else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
+		sam_task_attr = MSG_HEAD_TAG;
+	else if (iscsi_task_attr == ISCSI_ATTR_ACA)
+		sam_task_attr = MSG_ACA_TAG;
+	else {
+		pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
+			" MSG_SIMPLE_TAG\n", iscsi_task_attr);
+		sam_task_attr = MSG_SIMPLE_TAG;
+	}
+
+	se_cmd = &cmd->se_cmd;
+	/*
+	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
+	 */
+	transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
+			conn->sess->se_sess, data_length, data_direction,
+			sam_task_attr, &cmd->sense_buffer[0]);
+	return cmd;
+}
+
+struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
+	struct iscsi_conn *conn,
+	u8 function)
+{
+	struct iscsi_cmd *cmd;
+	struct se_cmd *se_cmd;
+	int rc;
+	u8 tcm_function;
+
+	cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+	if (!cmd)
+		return NULL;
+
+	cmd->data_direction = DMA_NONE;
+
+	cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
+	if (!cmd->tmr_req) {
+		pr_err("Unable to allocate memory for"
+			" Task Management command!\n");
+		goto out;
+	}
+	/*
+	 * TASK_REASSIGN for ERL=2 / connection stays inside of
+	 * LIO-Target $FABRIC_MOD
+	 */
+	if (function == ISCSI_TM_FUNC_TASK_REASSIGN)
+		return cmd;
+
+	se_cmd = &cmd->se_cmd;
+	/*
+	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
+	 */
+	transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
+				conn->sess->se_sess, 0, DMA_NONE,
+				MSG_SIMPLE_TAG, &cmd->sense_buffer[0]);
+
+	switch (function) {
+	case ISCSI_TM_FUNC_ABORT_TASK:
+		tcm_function = TMR_ABORT_TASK;
+		break;
+	case ISCSI_TM_FUNC_ABORT_TASK_SET:
+		tcm_function = TMR_ABORT_TASK_SET;
+		break;
+	case ISCSI_TM_FUNC_CLEAR_ACA:
+		tcm_function = TMR_CLEAR_ACA;
+		break;
+	case ISCSI_TM_FUNC_CLEAR_TASK_SET:
+		tcm_function = TMR_CLEAR_TASK_SET;
+		break;
+	case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+		tcm_function = TMR_LUN_RESET;
+		break;
+	case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+		tcm_function = TMR_TARGET_WARM_RESET;
+		break;
+	case ISCSI_TM_FUNC_TARGET_COLD_RESET:
+		tcm_function = TMR_TARGET_COLD_RESET;
+		break;
+	default:
+		pr_err("Unknown iSCSI TMR Function:"
+			" 0x%02x\n", function);
+		goto out;
+	}
+
+	rc = core_tmr_alloc_req(se_cmd, cmd->tmr_req, tcm_function, GFP_KERNEL);
+	if (rc < 0)
+		goto out;
+
+	cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req;
+
+	return cmd;
+out:
+	iscsit_release_cmd(cmd);
+	return NULL;
+}
+
+int iscsit_decide_list_to_build(
+	struct iscsi_cmd *cmd,
+	u32 immediate_data_length)
+{
+	struct iscsi_build_list bl;
+	struct iscsi_conn *conn = cmd->conn;
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_attrib *na;
+
+	if (sess->sess_ops->DataSequenceInOrder &&
+	    sess->sess_ops->DataPDUInOrder)
+		return 0;
+
+	if (cmd->data_direction == DMA_NONE)
+		return 0;
+
+	na = iscsit_tpg_get_node_attrib(sess);
+	memset(&bl, 0, sizeof(struct iscsi_build_list));
+
+	if (cmd->data_direction == DMA_FROM_DEVICE) {
+		bl.data_direction = ISCSI_PDU_READ;
+		bl.type = PDULIST_NORMAL;
+		if (na->random_datain_pdu_offsets)
+			bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
+		if (na->random_datain_seq_offsets)
+			bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
+	} else {
+		bl.data_direction = ISCSI_PDU_WRITE;
+		bl.immediate_data_length = immediate_data_length;
+		if (na->random_r2t_offsets)
+			bl.randomize |= RANDOM_R2T_OFFSETS;
+
+		if (!cmd->immediate_data && !cmd->unsolicited_data)
+			bl.type = PDULIST_NORMAL;
+		else if (cmd->immediate_data && !cmd->unsolicited_data)
+			bl.type = PDULIST_IMMEDIATE;
+		else if (!cmd->immediate_data && cmd->unsolicited_data)
+			bl.type = PDULIST_UNSOLICITED;
+		else if (cmd->immediate_data && cmd->unsolicited_data)
+			bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
+	}
+
+	return iscsit_do_build_list(cmd, &bl);
+}
+
+struct iscsi_seq *iscsit_get_seq_holder_for_datain(
+	struct iscsi_cmd *cmd,
+	u32 seq_send_order)
+{
+	u32 i;
+
+	for (i = 0; i < cmd->seq_count; i++)
+		if (cmd->seq_list[i].seq_send_order == seq_send_order)
+			return &cmd->seq_list[i];
+
+	return NULL;
+}
+
+struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd)
+{
+	u32 i;
+
+	if (!cmd->seq_list) {
+		pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+		return NULL;
+	}
+
+	for (i = 0; i < cmd->seq_count; i++) {
+		if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
+			continue;
+		if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) {
+			cmd->seq_send_order++;
+			return &cmd->seq_list[i];
+		}
+	}
+
+	return NULL;
+}
+
+struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
+	struct iscsi_cmd *cmd,
+	u32 r2t_sn)
+{
+	struct iscsi_r2t *r2t;
+
+	spin_lock_bh(&cmd->r2t_lock);
+	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+		if (r2t->r2t_sn == r2t_sn) {
+			spin_unlock_bh(&cmd->r2t_lock);
+			return r2t;
+		}
+	}
+	spin_unlock_bh(&cmd->r2t_lock);
+
+	return NULL;
+}
+
+static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn)
+{
+	int ret;
+
+	/*
+	 * This is the proper method of checking received CmdSN against
+	 * ExpCmdSN and MaxCmdSN values, as well as accounting for out
+	 * or order CmdSNs due to multiple connection sessions and/or
+	 * CRC failures.
+	 */
+	if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {
+		pr_err("Received CmdSN: 0x%08x is greater than"
+		       " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn,
+		       sess->max_cmd_sn);
+		ret = CMDSN_ERROR_CANNOT_RECOVER;
+
+	} else if (cmdsn == sess->exp_cmd_sn) {
+		sess->exp_cmd_sn++;
+		pr_debug("Received CmdSN matches ExpCmdSN,"
+		      " incremented ExpCmdSN to: 0x%08x\n",
+		      sess->exp_cmd_sn);
+		ret = CMDSN_NORMAL_OPERATION;
+
+	} else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) {
+		pr_debug("Received CmdSN: 0x%08x is greater"
+		      " than ExpCmdSN: 0x%08x, not acknowledging.\n",
+		      cmdsn, sess->exp_cmd_sn);
+		ret = CMDSN_HIGHER_THAN_EXP;
+
+	} else {
+		pr_err("Received CmdSN: 0x%08x is less than"
+		       " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn,
+		       sess->exp_cmd_sn);
+		ret = CMDSN_LOWER_THAN_EXP;
+	}
+
+	return ret;
+}
+
+/*
+ * Commands may be received out of order if MC/S is in use.
+ * Ensure they are executed in CmdSN order.
+ */
+int iscsit_sequence_cmd(
+	struct iscsi_conn *conn,
+	struct iscsi_cmd *cmd,
+	u32 cmdsn)
+{
+	int ret;
+	int cmdsn_ret;
+
+	mutex_lock(&conn->sess->cmdsn_mutex);
+
+	cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, cmdsn);
+	switch (cmdsn_ret) {
+	case CMDSN_NORMAL_OPERATION:
+		ret = iscsit_execute_cmd(cmd, 0);
+		if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
+			iscsit_execute_ooo_cmdsns(conn->sess);
+		break;
+	case CMDSN_HIGHER_THAN_EXP:
+		ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, cmdsn);
+		break;
+	case CMDSN_LOWER_THAN_EXP:
+		cmd->i_state = ISTATE_REMOVE;
+		iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
+		ret = cmdsn_ret;
+		break;
+	default:
+		ret = cmdsn_ret;
+		break;
+	}
+	mutex_unlock(&conn->sess->cmdsn_mutex);
+
+	return ret;
+}
+
+int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct iscsi_data *hdr = (struct iscsi_data *) buf;
+	u32 payload_length = ntoh24(hdr->dlength);
+
+	if (conn->sess->sess_ops->InitialR2T) {
+		pr_err("Received unexpected unsolicited data"
+			" while InitialR2T=Yes, protocol error.\n");
+		transport_send_check_condition_and_sense(se_cmd,
+				TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
+		return -1;
+	}
+
+	if ((cmd->first_burst_len + payload_length) >
+	     conn->sess->sess_ops->FirstBurstLength) {
+		pr_err("Total %u bytes exceeds FirstBurstLength: %u"
+			" for this Unsolicited DataOut Burst.\n",
+			(cmd->first_burst_len + payload_length),
+				conn->sess->sess_ops->FirstBurstLength);
+		transport_send_check_condition_and_sense(se_cmd,
+				TCM_INCORRECT_AMOUNT_OF_DATA, 0);
+		return -1;
+	}
+
+	if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
+		return 0;
+
+	if (((cmd->first_burst_len + payload_length) != cmd->data_length) &&
+	    ((cmd->first_burst_len + payload_length) !=
+	      conn->sess->sess_ops->FirstBurstLength)) {
+		pr_err("Unsolicited non-immediate data received %u"
+			" does not equal FirstBurstLength: %u, and does"
+			" not equal ExpXferLen %u.\n",
+			(cmd->first_burst_len + payload_length),
+			conn->sess->sess_ops->FirstBurstLength, cmd->data_length);
+		transport_send_check_condition_and_sense(se_cmd,
+				TCM_INCORRECT_AMOUNT_OF_DATA, 0);
+		return -1;
+	}
+	return 0;
+}
+
+struct iscsi_cmd *iscsit_find_cmd_from_itt(
+	struct iscsi_conn *conn,
+	u32 init_task_tag)
+{
+	struct iscsi_cmd *cmd;
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+		if (cmd->init_task_tag == init_task_tag) {
+			spin_unlock_bh(&conn->cmd_lock);
+			return cmd;
+		}
+	}
+	spin_unlock_bh(&conn->cmd_lock);
+
+	pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
+			init_task_tag, conn->cid);
+	return NULL;
+}
+
+struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
+	struct iscsi_conn *conn,
+	u32 init_task_tag,
+	u32 length)
+{
+	struct iscsi_cmd *cmd;
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+		if (cmd->init_task_tag == init_task_tag) {
+			spin_unlock_bh(&conn->cmd_lock);
+			return cmd;
+		}
+	}
+	spin_unlock_bh(&conn->cmd_lock);
+
+	pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
+			" dumping payload\n", init_task_tag, conn->cid);
+	if (length)
+		iscsit_dump_data_payload(conn, length, 1);
+
+	return NULL;
+}
+
+struct iscsi_cmd *iscsit_find_cmd_from_ttt(
+	struct iscsi_conn *conn,
+	u32 targ_xfer_tag)
+{
+	struct iscsi_cmd *cmd = NULL;
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+		if (cmd->targ_xfer_tag == targ_xfer_tag) {
+			spin_unlock_bh(&conn->cmd_lock);
+			return cmd;
+		}
+	}
+	spin_unlock_bh(&conn->cmd_lock);
+
+	pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
+			targ_xfer_tag, conn->cid);
+	return NULL;
+}
+
+int iscsit_find_cmd_for_recovery(
+	struct iscsi_session *sess,
+	struct iscsi_cmd **cmd_ptr,
+	struct iscsi_conn_recovery **cr_ptr,
+	u32 init_task_tag)
+{
+	struct iscsi_cmd *cmd = NULL;
+	struct iscsi_conn_recovery *cr;
+	/*
+	 * Scan through the inactive connection recovery list's command list.
+	 * If init_task_tag matches the command is still alligent.
+	 */
+	spin_lock(&sess->cr_i_lock);
+	list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
+		spin_lock(&cr->conn_recovery_cmd_lock);
+		list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
+			if (cmd->init_task_tag == init_task_tag) {
+				spin_unlock(&cr->conn_recovery_cmd_lock);
+				spin_unlock(&sess->cr_i_lock);
+
+				*cr_ptr = cr;
+				*cmd_ptr = cmd;
+				return -2;
+			}
+		}
+		spin_unlock(&cr->conn_recovery_cmd_lock);
+	}
+	spin_unlock(&sess->cr_i_lock);
+	/*
+	 * Scan through the active connection recovery list's command list.
+	 * If init_task_tag matches the command is ready to be reassigned.
+	 */
+	spin_lock(&sess->cr_a_lock);
+	list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
+		spin_lock(&cr->conn_recovery_cmd_lock);
+		list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
+			if (cmd->init_task_tag == init_task_tag) {
+				spin_unlock(&cr->conn_recovery_cmd_lock);
+				spin_unlock(&sess->cr_a_lock);
+
+				*cr_ptr = cr;
+				*cmd_ptr = cmd;
+				return 0;
+			}
+		}
+		spin_unlock(&cr->conn_recovery_cmd_lock);
+	}
+	spin_unlock(&sess->cr_a_lock);
+
+	return -1;
+}
+
+void iscsit_add_cmd_to_immediate_queue(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn,
+	u8 state)
+{
+	struct iscsi_queue_req *qr;
+
+	qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
+	if (!qr) {
+		pr_err("Unable to allocate memory for"
+				" struct iscsi_queue_req\n");
+		return;
+	}
+	INIT_LIST_HEAD(&qr->qr_list);
+	qr->cmd = cmd;
+	qr->state = state;
+
+	spin_lock_bh(&conn->immed_queue_lock);
+	list_add_tail(&qr->qr_list, &conn->immed_queue_list);
+	atomic_inc(&cmd->immed_queue_count);
+	atomic_set(&conn->check_immediate_queue, 1);
+	spin_unlock_bh(&conn->immed_queue_lock);
+
+	wake_up(&conn->queues_wq);
+}
+
+struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
+{
+	struct iscsi_queue_req *qr;
+
+	spin_lock_bh(&conn->immed_queue_lock);
+	if (list_empty(&conn->immed_queue_list)) {
+		spin_unlock_bh(&conn->immed_queue_lock);
+		return NULL;
+	}
+	list_for_each_entry(qr, &conn->immed_queue_list, qr_list)
+		break;
+
+	list_del(&qr->qr_list);
+	if (qr->cmd)
+		atomic_dec(&qr->cmd->immed_queue_count);
+	spin_unlock_bh(&conn->immed_queue_lock);
+
+	return qr;
+}
+
+static void iscsit_remove_cmd_from_immediate_queue(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	struct iscsi_queue_req *qr, *qr_tmp;
+
+	spin_lock_bh(&conn->immed_queue_lock);
+	if (!atomic_read(&cmd->immed_queue_count)) {
+		spin_unlock_bh(&conn->immed_queue_lock);
+		return;
+	}
+
+	list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
+		if (qr->cmd != cmd)
+			continue;
+
+		atomic_dec(&qr->cmd->immed_queue_count);
+		list_del(&qr->qr_list);
+		kmem_cache_free(lio_qr_cache, qr);
+	}
+	spin_unlock_bh(&conn->immed_queue_lock);
+
+	if (atomic_read(&cmd->immed_queue_count)) {
+		pr_err("ITT: 0x%08x immed_queue_count: %d\n",
+			cmd->init_task_tag,
+			atomic_read(&cmd->immed_queue_count));
+	}
+}
+
+void iscsit_add_cmd_to_response_queue(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn,
+	u8 state)
+{
+	struct iscsi_queue_req *qr;
+
+	qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
+	if (!qr) {
+		pr_err("Unable to allocate memory for"
+			" struct iscsi_queue_req\n");
+		return;
+	}
+	INIT_LIST_HEAD(&qr->qr_list);
+	qr->cmd = cmd;
+	qr->state = state;
+
+	spin_lock_bh(&conn->response_queue_lock);
+	list_add_tail(&qr->qr_list, &conn->response_queue_list);
+	atomic_inc(&cmd->response_queue_count);
+	spin_unlock_bh(&conn->response_queue_lock);
+
+	wake_up(&conn->queues_wq);
+}
+
+struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
+{
+	struct iscsi_queue_req *qr;
+
+	spin_lock_bh(&conn->response_queue_lock);
+	if (list_empty(&conn->response_queue_list)) {
+		spin_unlock_bh(&conn->response_queue_lock);
+		return NULL;
+	}
+
+	list_for_each_entry(qr, &conn->response_queue_list, qr_list)
+		break;
+
+	list_del(&qr->qr_list);
+	if (qr->cmd)
+		atomic_dec(&qr->cmd->response_queue_count);
+	spin_unlock_bh(&conn->response_queue_lock);
+
+	return qr;
+}
+
+static void iscsit_remove_cmd_from_response_queue(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	struct iscsi_queue_req *qr, *qr_tmp;
+
+	spin_lock_bh(&conn->response_queue_lock);
+	if (!atomic_read(&cmd->response_queue_count)) {
+		spin_unlock_bh(&conn->response_queue_lock);
+		return;
+	}
+
+	list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
+				qr_list) {
+		if (qr->cmd != cmd)
+			continue;
+
+		atomic_dec(&qr->cmd->response_queue_count);
+		list_del(&qr->qr_list);
+		kmem_cache_free(lio_qr_cache, qr);
+	}
+	spin_unlock_bh(&conn->response_queue_lock);
+
+	if (atomic_read(&cmd->response_queue_count)) {
+		pr_err("ITT: 0x%08x response_queue_count: %d\n",
+			cmd->init_task_tag,
+			atomic_read(&cmd->response_queue_count));
+	}
+}
+
+bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn)
+{
+	bool empty;
+
+	spin_lock_bh(&conn->immed_queue_lock);
+	empty = list_empty(&conn->immed_queue_list);
+	spin_unlock_bh(&conn->immed_queue_lock);
+
+	if (!empty)
+		return empty;
+
+	spin_lock_bh(&conn->response_queue_lock);
+	empty = list_empty(&conn->response_queue_list);
+	spin_unlock_bh(&conn->response_queue_lock);
+
+	return empty;
+}
+
+void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
+{
+	struct iscsi_queue_req *qr, *qr_tmp;
+
+	spin_lock_bh(&conn->immed_queue_lock);
+	list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
+		list_del(&qr->qr_list);
+		if (qr->cmd)
+			atomic_dec(&qr->cmd->immed_queue_count);
+
+		kmem_cache_free(lio_qr_cache, qr);
+	}
+	spin_unlock_bh(&conn->immed_queue_lock);
+
+	spin_lock_bh(&conn->response_queue_lock);
+	list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
+			qr_list) {
+		list_del(&qr->qr_list);
+		if (qr->cmd)
+			atomic_dec(&qr->cmd->response_queue_count);
+
+		kmem_cache_free(lio_qr_cache, qr);
+	}
+	spin_unlock_bh(&conn->response_queue_lock);
+}
+
+void iscsit_release_cmd(struct iscsi_cmd *cmd)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	int i;
+
+	iscsit_free_r2ts_from_list(cmd);
+	iscsit_free_all_datain_reqs(cmd);
+
+	kfree(cmd->buf_ptr);
+	kfree(cmd->pdu_list);
+	kfree(cmd->seq_list);
+	kfree(cmd->tmr_req);
+	kfree(cmd->iov_data);
+
+	for (i = 0; i < cmd->t_mem_sg_nents; i++)
+		__free_page(sg_page(&cmd->t_mem_sg[i]));
+
+	kfree(cmd->t_mem_sg);
+
+	if (conn) {
+		iscsit_remove_cmd_from_immediate_queue(cmd, conn);
+		iscsit_remove_cmd_from_response_queue(cmd, conn);
+	}
+
+	kmem_cache_free(lio_cmd_cache, cmd);
+}
+
+void iscsit_free_cmd(struct iscsi_cmd *cmd)
+{
+	/*
+	 * Determine if a struct se_cmd is assoicated with
+	 * this struct iscsi_cmd.
+	 */
+	switch (cmd->iscsi_opcode) {
+	case ISCSI_OP_SCSI_CMD:
+	case ISCSI_OP_SCSI_TMFUNC:
+		transport_generic_free_cmd(&cmd->se_cmd, 1);
+		break;
+	case ISCSI_OP_REJECT:
+		/*
+		 * Handle special case for REJECT when iscsi_add_reject*() has
+		 * overwritten the original iscsi_opcode assignment, and the
+		 * associated cmd->se_cmd needs to be released.
+		 */
+		if (cmd->se_cmd.se_tfo != NULL) {
+			transport_generic_free_cmd(&cmd->se_cmd, 1);
+			break;
+		}
+		/* Fall-through */
+	default:
+		iscsit_release_cmd(cmd);
+		break;
+	}
+}
+
+int iscsit_check_session_usage_count(struct iscsi_session *sess)
+{
+	spin_lock_bh(&sess->session_usage_lock);
+	if (sess->session_usage_count != 0) {
+		sess->session_waiting_on_uc = 1;
+		spin_unlock_bh(&sess->session_usage_lock);
+		if (in_interrupt())
+			return 2;
+
+		wait_for_completion(&sess->session_waiting_on_uc_comp);
+		return 1;
+	}
+	spin_unlock_bh(&sess->session_usage_lock);
+
+	return 0;
+}
+
+void iscsit_dec_session_usage_count(struct iscsi_session *sess)
+{
+	spin_lock_bh(&sess->session_usage_lock);
+	sess->session_usage_count--;
+
+	if (!sess->session_usage_count && sess->session_waiting_on_uc)
+		complete(&sess->session_waiting_on_uc_comp);
+
+	spin_unlock_bh(&sess->session_usage_lock);
+}
+
+void iscsit_inc_session_usage_count(struct iscsi_session *sess)
+{
+	spin_lock_bh(&sess->session_usage_lock);
+	sess->session_usage_count++;
+	spin_unlock_bh(&sess->session_usage_lock);
+}
+
+/*
+ *	Setup conn->if_marker and conn->of_marker values based upon
+ *	the initial marker-less interval. (see iSCSI v19 A.2)
+ */
+int iscsit_set_sync_and_steering_values(struct iscsi_conn *conn)
+{
+	int login_ifmarker_count = 0, login_ofmarker_count = 0, next_marker = 0;
+	/*
+	 * IFMarkInt and OFMarkInt are negotiated as 32-bit words.
+	 */
+	u32 IFMarkInt = (conn->conn_ops->IFMarkInt * 4);
+	u32 OFMarkInt = (conn->conn_ops->OFMarkInt * 4);
+
+	if (conn->conn_ops->OFMarker) {
+		/*
+		 * Account for the first Login Command received not
+		 * via iscsi_recv_msg().
+		 */
+		conn->of_marker += ISCSI_HDR_LEN;
+		if (conn->of_marker <= OFMarkInt) {
+			conn->of_marker = (OFMarkInt - conn->of_marker);
+		} else {
+			login_ofmarker_count = (conn->of_marker / OFMarkInt);
+			next_marker = (OFMarkInt * (login_ofmarker_count + 1)) +
+					(login_ofmarker_count * MARKER_SIZE);
+			conn->of_marker = (next_marker - conn->of_marker);
+		}
+		conn->of_marker_offset = 0;
+		pr_debug("Setting OFMarker value to %u based on Initial"
+			" Markerless Interval.\n", conn->of_marker);
+	}
+
+	if (conn->conn_ops->IFMarker) {
+		if (conn->if_marker <= IFMarkInt) {
+			conn->if_marker = (IFMarkInt - conn->if_marker);
+		} else {
+			login_ifmarker_count = (conn->if_marker / IFMarkInt);
+			next_marker = (IFMarkInt * (login_ifmarker_count + 1)) +
+					(login_ifmarker_count * MARKER_SIZE);
+			conn->if_marker = (next_marker - conn->if_marker);
+		}
+		pr_debug("Setting IFMarker value to %u based on Initial"
+			" Markerless Interval.\n", conn->if_marker);
+	}
+
+	return 0;
+}
+
+struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
+{
+	struct iscsi_conn *conn;
+
+	spin_lock_bh(&sess->conn_lock);
+	list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+		if ((conn->cid == cid) &&
+		    (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) {
+			iscsit_inc_conn_usage_count(conn);
+			spin_unlock_bh(&sess->conn_lock);
+			return conn;
+		}
+	}
+	spin_unlock_bh(&sess->conn_lock);
+
+	return NULL;
+}
+
+struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid)
+{
+	struct iscsi_conn *conn;
+
+	spin_lock_bh(&sess->conn_lock);
+	list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+		if (conn->cid == cid) {
+			iscsit_inc_conn_usage_count(conn);
+			spin_lock(&conn->state_lock);
+			atomic_set(&conn->connection_wait_rcfr, 1);
+			spin_unlock(&conn->state_lock);
+			spin_unlock_bh(&sess->conn_lock);
+			return conn;
+		}
+	}
+	spin_unlock_bh(&sess->conn_lock);
+
+	return NULL;
+}
+
+void iscsit_check_conn_usage_count(struct iscsi_conn *conn)
+{
+	spin_lock_bh(&conn->conn_usage_lock);
+	if (conn->conn_usage_count != 0) {
+		conn->conn_waiting_on_uc = 1;
+		spin_unlock_bh(&conn->conn_usage_lock);
+
+		wait_for_completion(&conn->conn_waiting_on_uc_comp);
+		return;
+	}
+	spin_unlock_bh(&conn->conn_usage_lock);
+}
+
+void iscsit_dec_conn_usage_count(struct iscsi_conn *conn)
+{
+	spin_lock_bh(&conn->conn_usage_lock);
+	conn->conn_usage_count--;
+
+	if (!conn->conn_usage_count && conn->conn_waiting_on_uc)
+		complete(&conn->conn_waiting_on_uc_comp);
+
+	spin_unlock_bh(&conn->conn_usage_lock);
+}
+
+void iscsit_inc_conn_usage_count(struct iscsi_conn *conn)
+{
+	spin_lock_bh(&conn->conn_usage_lock);
+	conn->conn_usage_count++;
+	spin_unlock_bh(&conn->conn_usage_lock);
+}
+
+static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
+{
+	u8 state;
+	struct iscsi_cmd *cmd;
+
+	cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC);
+	if (!cmd)
+		return -1;
+
+	cmd->iscsi_opcode = ISCSI_OP_NOOP_IN;
+	state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
+				ISTATE_SEND_NOPIN_NO_RESPONSE;
+	cmd->init_task_tag = 0xFFFFFFFF;
+	spin_lock_bh(&conn->sess->ttt_lock);
+	cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ :
+			0xFFFFFFFF;
+	if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF))
+		cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
+	spin_unlock_bh(&conn->sess->ttt_lock);
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+	spin_unlock_bh(&conn->cmd_lock);
+
+	if (want_response)
+		iscsit_start_nopin_response_timer(conn);
+	iscsit_add_cmd_to_immediate_queue(cmd, conn, state);
+
+	return 0;
+}
+
+static void iscsit_handle_nopin_response_timeout(unsigned long data)
+{
+	struct iscsi_conn *conn = (struct iscsi_conn *) data;
+
+	iscsit_inc_conn_usage_count(conn);
+
+	spin_lock_bh(&conn->nopin_timer_lock);
+	if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) {
+		spin_unlock_bh(&conn->nopin_timer_lock);
+		iscsit_dec_conn_usage_count(conn);
+		return;
+	}
+
+	pr_debug("Did not receive response to NOPIN on CID: %hu on"
+		" SID: %u, failing connection.\n", conn->cid,
+			conn->sess->sid);
+	conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
+	spin_unlock_bh(&conn->nopin_timer_lock);
+
+	{
+	struct iscsi_portal_group *tpg = conn->sess->tpg;
+	struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+	if (tiqn) {
+		spin_lock_bh(&tiqn->sess_err_stats.lock);
+		strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
+				conn->sess->sess_ops->InitiatorName);
+		tiqn->sess_err_stats.last_sess_failure_type =
+				ISCSI_SESS_ERR_CXN_TIMEOUT;
+		tiqn->sess_err_stats.cxn_timeout_errors++;
+		conn->sess->conn_timeout_errors++;
+		spin_unlock_bh(&tiqn->sess_err_stats.lock);
+	}
+	}
+
+	iscsit_cause_connection_reinstatement(conn, 0);
+	iscsit_dec_conn_usage_count(conn);
+}
+
+void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
+{
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+	spin_lock_bh(&conn->nopin_timer_lock);
+	if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
+		spin_unlock_bh(&conn->nopin_timer_lock);
+		return;
+	}
+
+	mod_timer(&conn->nopin_response_timer,
+		(get_jiffies_64() + na->nopin_response_timeout * HZ));
+	spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+/*
+ *	Called with conn->nopin_timer_lock held.
+ */
+void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
+{
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+	spin_lock_bh(&conn->nopin_timer_lock);
+	if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) {
+		spin_unlock_bh(&conn->nopin_timer_lock);
+		return;
+	}
+
+	init_timer(&conn->nopin_response_timer);
+	conn->nopin_response_timer.expires =
+		(get_jiffies_64() + na->nopin_response_timeout * HZ);
+	conn->nopin_response_timer.data = (unsigned long)conn;
+	conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout;
+	conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP;
+	conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING;
+	add_timer(&conn->nopin_response_timer);
+
+	pr_debug("Started NOPIN Response Timer on CID: %d to %u"
+		" seconds\n", conn->cid, na->nopin_response_timeout);
+	spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn)
+{
+	spin_lock_bh(&conn->nopin_timer_lock);
+	if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
+		spin_unlock_bh(&conn->nopin_timer_lock);
+		return;
+	}
+	conn->nopin_response_timer_flags |= ISCSI_TF_STOP;
+	spin_unlock_bh(&conn->nopin_timer_lock);
+
+	del_timer_sync(&conn->nopin_response_timer);
+
+	spin_lock_bh(&conn->nopin_timer_lock);
+	conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
+	spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+static void iscsit_handle_nopin_timeout(unsigned long data)
+{
+	struct iscsi_conn *conn = (struct iscsi_conn *) data;
+
+	iscsit_inc_conn_usage_count(conn);
+
+	spin_lock_bh(&conn->nopin_timer_lock);
+	if (conn->nopin_timer_flags & ISCSI_TF_STOP) {
+		spin_unlock_bh(&conn->nopin_timer_lock);
+		iscsit_dec_conn_usage_count(conn);
+		return;
+	}
+	conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
+	spin_unlock_bh(&conn->nopin_timer_lock);
+
+	iscsit_add_nopin(conn, 1);
+	iscsit_dec_conn_usage_count(conn);
+}
+
+/*
+ * Called with conn->nopin_timer_lock held.
+ */
+void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
+{
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+	/*
+	* NOPIN timeout is disabled.
+	 */
+	if (!na->nopin_timeout)
+		return;
+
+	if (conn->nopin_timer_flags & ISCSI_TF_RUNNING)
+		return;
+
+	init_timer(&conn->nopin_timer);
+	conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
+	conn->nopin_timer.data = (unsigned long)conn;
+	conn->nopin_timer.function = iscsit_handle_nopin_timeout;
+	conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
+	conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
+	add_timer(&conn->nopin_timer);
+
+	pr_debug("Started NOPIN Timer on CID: %d at %u second"
+		" interval\n", conn->cid, na->nopin_timeout);
+}
+
+void iscsit_start_nopin_timer(struct iscsi_conn *conn)
+{
+	struct iscsi_session *sess = conn->sess;
+	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+	/*
+	 * NOPIN timeout is disabled..
+	 */
+	if (!na->nopin_timeout)
+		return;
+
+	spin_lock_bh(&conn->nopin_timer_lock);
+	if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) {
+		spin_unlock_bh(&conn->nopin_timer_lock);
+		return;
+	}
+
+	init_timer(&conn->nopin_timer);
+	conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
+	conn->nopin_timer.data = (unsigned long)conn;
+	conn->nopin_timer.function = iscsit_handle_nopin_timeout;
+	conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
+	conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
+	add_timer(&conn->nopin_timer);
+
+	pr_debug("Started NOPIN Timer on CID: %d at %u second"
+			" interval\n", conn->cid, na->nopin_timeout);
+	spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+void iscsit_stop_nopin_timer(struct iscsi_conn *conn)
+{
+	spin_lock_bh(&conn->nopin_timer_lock);
+	if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) {
+		spin_unlock_bh(&conn->nopin_timer_lock);
+		return;
+	}
+	conn->nopin_timer_flags |= ISCSI_TF_STOP;
+	spin_unlock_bh(&conn->nopin_timer_lock);
+
+	del_timer_sync(&conn->nopin_timer);
+
+	spin_lock_bh(&conn->nopin_timer_lock);
+	conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
+	spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+int iscsit_send_tx_data(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn,
+	int use_misc)
+{
+	int tx_sent, tx_size;
+	u32 iov_count;
+	struct kvec *iov;
+
+send_data:
+	tx_size = cmd->tx_size;
+
+	if (!use_misc) {
+		iov = &cmd->iov_data[0];
+		iov_count = cmd->iov_data_count;
+	} else {
+		iov = &cmd->iov_misc[0];
+		iov_count = cmd->iov_misc_count;
+	}
+
+	tx_sent = tx_data(conn, &iov[0], iov_count, tx_size);
+	if (tx_size != tx_sent) {
+		if (tx_sent == -EAGAIN) {
+			pr_err("tx_data() returned -EAGAIN\n");
+			goto send_data;
+		} else
+			return -1;
+	}
+	cmd->tx_size = 0;
+
+	return 0;
+}
+
+int iscsit_fe_sendpage_sg(
+	struct iscsi_cmd *cmd,
+	struct iscsi_conn *conn)
+{
+	struct scatterlist *sg = cmd->first_data_sg;
+	struct kvec iov;
+	u32 tx_hdr_size, data_len;
+	u32 offset = cmd->first_data_sg_off;
+	int tx_sent, iov_off;
+
+send_hdr:
+	tx_hdr_size = ISCSI_HDR_LEN;
+	if (conn->conn_ops->HeaderDigest)
+		tx_hdr_size += ISCSI_CRC_LEN;
+
+	iov.iov_base = cmd->pdu;
+	iov.iov_len = tx_hdr_size;
+
+	tx_sent = tx_data(conn, &iov, 1, tx_hdr_size);
+	if (tx_hdr_size != tx_sent) {
+		if (tx_sent == -EAGAIN) {
+			pr_err("tx_data() returned -EAGAIN\n");
+			goto send_hdr;
+		}
+		return -1;
+	}
+
+	data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
+	/*
+	 * Set iov_off used by padding and data digest tx_data() calls below
+	 * in order to determine proper offset into cmd->iov_data[]
+	 */
+	if (conn->conn_ops->DataDigest) {
+		data_len -= ISCSI_CRC_LEN;
+		if (cmd->padding)
+			iov_off = (cmd->iov_data_count - 2);
+		else
+			iov_off = (cmd->iov_data_count - 1);
+	} else {
+		iov_off = (cmd->iov_data_count - 1);
+	}
+	/*
+	 * Perform sendpage() for each page in the scatterlist
+	 */
+	while (data_len) {
+		u32 space = (sg->length - offset);
+		u32 sub_len = min_t(u32, data_len, space);
+send_pg:
+		tx_sent = conn->sock->ops->sendpage(conn->sock,
+					sg_page(sg), sg->offset + offset, sub_len, 0);
+		if (tx_sent != sub_len) {
+			if (tx_sent == -EAGAIN) {
+				pr_err("tcp_sendpage() returned"
+						" -EAGAIN\n");
+				goto send_pg;
+			}
+
+			pr_err("tcp_sendpage() failure: %d\n",
+					tx_sent);
+			return -1;
+		}
+
+		data_len -= sub_len;
+		offset = 0;
+		sg = sg_next(sg);
+	}
+
+send_padding:
+	if (cmd->padding) {
+		struct kvec *iov_p = &cmd->iov_data[iov_off++];
+
+		tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
+		if (cmd->padding != tx_sent) {
+			if (tx_sent == -EAGAIN) {
+				pr_err("tx_data() returned -EAGAIN\n");
+				goto send_padding;
+			}
+			return -1;
+		}
+	}
+
+send_datacrc:
+	if (conn->conn_ops->DataDigest) {
+		struct kvec *iov_d = &cmd->iov_data[iov_off];
+
+		tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
+		if (ISCSI_CRC_LEN != tx_sent) {
+			if (tx_sent == -EAGAIN) {
+				pr_err("tx_data() returned -EAGAIN\n");
+				goto send_datacrc;
+			}
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ *      This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU
+ *      back to the Initiator when an expection condition occurs with the
+ *      errors set in status_class and status_detail.
+ *
+ *      Parameters:     iSCSI Connection, Status Class, Status Detail.
+ *      Returns:        0 on success, -1 on error.
+ */
+int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail)
+{
+	u8 iscsi_hdr[ISCSI_HDR_LEN];
+	int err;
+	struct kvec iov;
+	struct iscsi_login_rsp *hdr;
+
+	iscsit_collect_login_stats(conn, status_class, status_detail);
+
+	memset(&iov, 0, sizeof(struct kvec));
+	memset(&iscsi_hdr, 0x0, ISCSI_HDR_LEN);
+
+	hdr	= (struct iscsi_login_rsp *)&iscsi_hdr;
+	hdr->opcode		= ISCSI_OP_LOGIN_RSP;
+	hdr->status_class	= status_class;
+	hdr->status_detail	= status_detail;
+	hdr->itt		= cpu_to_be32(conn->login_itt);
+
+	iov.iov_base		= &iscsi_hdr;
+	iov.iov_len		= ISCSI_HDR_LEN;
+
+	PRINT_BUFF(iscsi_hdr, ISCSI_HDR_LEN);
+
+	err = tx_data(conn, &iov, 1, ISCSI_HDR_LEN);
+	if (err != ISCSI_HDR_LEN) {
+		pr_err("tx_data returned less than expected\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+void iscsit_print_session_params(struct iscsi_session *sess)
+{
+	struct iscsi_conn *conn;
+
+	pr_debug("-----------------------------[Session Params for"
+		" SID: %u]-----------------------------\n", sess->sid);
+	spin_lock_bh(&sess->conn_lock);
+	list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
+		iscsi_dump_conn_ops(conn->conn_ops);
+	spin_unlock_bh(&sess->conn_lock);
+
+	iscsi_dump_sess_ops(sess->sess_ops);
+}
+
+static int iscsit_do_rx_data(
+	struct iscsi_conn *conn,
+	struct iscsi_data_count *count)
+{
+	int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
+	struct kvec *iov_p;
+	struct msghdr msg;
+
+	if (!conn || !conn->sock || !conn->conn_ops)
+		return -1;
+
+	memset(&msg, 0, sizeof(struct msghdr));
+
+	iov_p = count->iov;
+	iov_len	= count->iov_count;
+
+	while (total_rx < data) {
+		rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
+					(data - total_rx), MSG_WAITALL);
+		if (rx_loop <= 0) {
+			pr_debug("rx_loop: %d total_rx: %d\n",
+				rx_loop, total_rx);
+			return rx_loop;
+		}
+		total_rx += rx_loop;
+		pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
+				rx_loop, total_rx, data);
+	}
+
+	return total_rx;
+}
+
+static int iscsit_do_tx_data(
+	struct iscsi_conn *conn,
+	struct iscsi_data_count *count)
+{
+	int ret, iov_len;
+	struct kvec *iov_p;
+	struct msghdr msg;
+
+	if (!conn || !conn->sock || !conn->conn_ops)
+		return -1;
+
+	if (count->data_length <= 0) {
+		pr_err("Data length is: %d\n", count->data_length);
+		return -1;
+	}
+
+	memset(&msg, 0, sizeof(struct msghdr));
+
+	iov_p = count->iov;
+	iov_len = count->iov_count;
+
+	ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
+			     count->data_length);
+	if (ret != count->data_length) {
+		pr_err("Unexpected ret: %d send data %d\n",
+		       ret, count->data_length);
+		return -EPIPE;
+	}
+	pr_debug("ret: %d, sent data: %d\n", ret, count->data_length);
+
+	return ret;
+}
+
+int rx_data(
+	struct iscsi_conn *conn,
+	struct kvec *iov,
+	int iov_count,
+	int data)
+{
+	struct iscsi_data_count c;
+
+	if (!conn || !conn->sock || !conn->conn_ops)
+		return -1;
+
+	memset(&c, 0, sizeof(struct iscsi_data_count));
+	c.iov = iov;
+	c.iov_count = iov_count;
+	c.data_length = data;
+	c.type = ISCSI_RX_DATA;
+
+	return iscsit_do_rx_data(conn, &c);
+}
+
+int tx_data(
+	struct iscsi_conn *conn,
+	struct kvec *iov,
+	int iov_count,
+	int data)
+{
+	struct iscsi_data_count c;
+
+	if (!conn || !conn->sock || !conn->conn_ops)
+		return -1;
+
+	memset(&c, 0, sizeof(struct iscsi_data_count));
+	c.iov = iov;
+	c.iov_count = iov_count;
+	c.data_length = data;
+	c.type = ISCSI_TX_DATA;
+
+	return iscsit_do_tx_data(conn, &c);
+}
+
+void iscsit_collect_login_stats(
+	struct iscsi_conn *conn,
+	u8 status_class,
+	u8 status_detail)
+{
+	struct iscsi_param *intrname = NULL;
+	struct iscsi_tiqn *tiqn;
+	struct iscsi_login_stats *ls;
+
+	tiqn = iscsit_snmp_get_tiqn(conn);
+	if (!tiqn)
+		return;
+
+	ls = &tiqn->login_stats;
+
+	spin_lock(&ls->lock);
+	if (!strcmp(conn->login_ip, ls->last_intr_fail_ip_addr) &&
+	    ((get_jiffies_64() - ls->last_fail_time) < 10)) {
+		/* We already have the failure info for this login */
+		spin_unlock(&ls->lock);
+		return;
+	}
+
+	if (status_class == ISCSI_STATUS_CLS_SUCCESS)
+		ls->accepts++;
+	else if (status_class == ISCSI_STATUS_CLS_REDIRECT) {
+		ls->redirects++;
+		ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT;
+	} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR)  &&
+		 (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) {
+		ls->authenticate_fails++;
+		ls->last_fail_type =  ISCSI_LOGIN_FAIL_AUTHENTICATE;
+	} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR)  &&
+		 (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) {
+		ls->authorize_fails++;
+		ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE;
+	} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
+		 (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) {
+		ls->negotiate_fails++;
+		ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE;
+	} else {
+		ls->other_fails++;
+		ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER;
+	}
+
+	/* Save initiator name, ip address and time, if it is a failed login */
+	if (status_class != ISCSI_STATUS_CLS_SUCCESS) {
+		if (conn->param_list)
+			intrname = iscsi_find_param_from_key(INITIATORNAME,
+							     conn->param_list);
+		strcpy(ls->last_intr_fail_name,
+		       (intrname ? intrname->value : "Unknown"));
+
+		ls->last_intr_fail_ip_family = conn->sock->sk->sk_family;
+		snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE,
+				"%s", conn->login_ip);
+		ls->last_fail_time = get_jiffies_64();
+	}
+
+	spin_unlock(&ls->lock);
+}
+
+struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
+{
+	struct iscsi_portal_group *tpg;
+
+	if (!conn || !conn->sess)
+		return NULL;
+
+	tpg = conn->sess->tpg;
+	if (!tpg)
+		return NULL;
+
+	if (!tpg->tpg_tiqn)
+		return NULL;
+
+	return tpg->tpg_tiqn;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_util.h b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_util.h
new file mode 100644
index 0000000..cfac698
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/iscsi/iscsi_target_util.h
@@ -0,0 +1,62 @@
+#ifndef ISCSI_TARGET_UTIL_H
+#define ISCSI_TARGET_UTIL_H
+
+#define MARKER_SIZE	8
+
+extern int iscsit_add_r2t_to_list(struct iscsi_cmd *, u32, u32, int, u32);
+extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsi_cmd *, u32, u32);
+extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
+extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
+extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
+extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
+extern struct iscsi_cmd *iscsit_allocate_se_cmd(struct iscsi_conn *, u32, int, int);
+extern struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(struct iscsi_conn *, u8);
+extern int iscsit_decide_list_to_build(struct iscsi_cmd *, u32);
+extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
+extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
+extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
+int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, u32 cmdsn);
+extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
+extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, u32);
+extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
+			u32, u32);
+extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32);
+extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd **,
+			struct iscsi_conn_recovery **, u32);
+extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
+extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
+extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
+extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
+extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
+extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
+extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
+extern void iscsit_release_cmd(struct iscsi_cmd *);
+extern void iscsit_free_cmd(struct iscsi_cmd *);
+extern int iscsit_check_session_usage_count(struct iscsi_session *);
+extern void iscsit_dec_session_usage_count(struct iscsi_session *);
+extern void iscsit_inc_session_usage_count(struct iscsi_session *);
+extern int iscsit_set_sync_and_steering_values(struct iscsi_conn *);
+extern struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *, u16);
+extern struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *, u16);
+extern void iscsit_check_conn_usage_count(struct iscsi_conn *);
+extern void iscsit_dec_conn_usage_count(struct iscsi_conn *);
+extern void iscsit_inc_conn_usage_count(struct iscsi_conn *);
+extern void iscsit_mod_nopin_response_timer(struct iscsi_conn *);
+extern void iscsit_start_nopin_response_timer(struct iscsi_conn *);
+extern void iscsit_stop_nopin_response_timer(struct iscsi_conn *);
+extern void __iscsit_start_nopin_timer(struct iscsi_conn *);
+extern void iscsit_start_nopin_timer(struct iscsi_conn *);
+extern void iscsit_stop_nopin_timer(struct iscsi_conn *);
+extern int iscsit_send_tx_data(struct iscsi_cmd *, struct iscsi_conn *, int);
+extern int iscsit_fe_sendpage_sg(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_tx_login_rsp(struct iscsi_conn *, u8, u8);
+extern void iscsit_print_session_params(struct iscsi_session *);
+extern int iscsit_print_dev_to_proc(char *, char **, off_t, int);
+extern int iscsit_print_sessions_to_proc(char *, char **, off_t, int);
+extern int iscsit_print_tpg_to_proc(char *, char **, off_t, int);
+extern int rx_data(struct iscsi_conn *, struct kvec *, int, int);
+extern int tx_data(struct iscsi_conn *, struct kvec *, int, int);
+extern void iscsit_collect_login_stats(struct iscsi_conn *, u8, u8);
+extern struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *);
+
+#endif /*** ISCSI_TARGET_UTIL_H ***/
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/loopback/Kconfig b/ap/os/linux/linux-3.4.x/drivers/target/loopback/Kconfig
new file mode 100644
index 0000000..abe8ecb
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/loopback/Kconfig
@@ -0,0 +1,5 @@
+config LOOPBACK_TARGET
+	tristate "TCM Virtual SAS target and Linux/SCSI LDD fabric loopback module"
+	help
+	  Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD
+	  fabric loopback module.
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/loopback/Makefile b/ap/os/linux/linux-3.4.x/drivers/target/loopback/Makefile
new file mode 100644
index 0000000..6abebdf
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/loopback/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_LOOPBACK_TARGET)	+= tcm_loop.o
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/loopback/tcm_loop.c b/ap/os/linux/linux-3.4.x/drivers/target/loopback/tcm_loop.c
new file mode 100644
index 0000000..a9b4eee
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/loopback/tcm_loop.c
@@ -0,0 +1,1484 @@
+/*******************************************************************************
+ *
+ * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
+ * for emulated SAS initiator ports
+ *
+ * © Copyright 2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/configfs.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+
+#include "tcm_loop.h"
+
+#define to_tcm_loop_hba(hba)	container_of(hba, struct tcm_loop_hba, dev)
+
+/* Local pointer to allocated TCM configfs fabric module */
+static struct target_fabric_configfs *tcm_loop_fabric_configfs;
+
+static struct workqueue_struct *tcm_loop_workqueue;
+static struct kmem_cache *tcm_loop_cmd_cache;
+
+static int tcm_loop_hba_no_cnt;
+
+static int tcm_loop_queue_status(struct se_cmd *se_cmd);
+
+/*
+ * Called from struct target_core_fabric_ops->check_stop_free()
+ */
+static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
+{
+	/*
+	 * Do not release struct se_cmd's containing a valid TMR
+	 * pointer.  These will be released directly in tcm_loop_device_reset()
+	 * with transport_generic_free_cmd().
+	 */
+	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+		return 0;
+	/*
+	 * Release the struct se_cmd, which will make a callback to release
+	 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
+	 */
+	transport_generic_free_cmd(se_cmd, 0);
+	return 1;
+}
+
+static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
+{
+	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+				struct tcm_loop_cmd, tl_se_cmd);
+
+	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+}
+
+static int tcm_loop_proc_info(struct Scsi_Host *host, char *buffer,
+				char **start, off_t offset,
+				int length, int inout)
+{
+	return sprintf(buffer, "tcm_loop_proc_info()\n");
+}
+
+static int tcm_loop_driver_probe(struct device *);
+static int tcm_loop_driver_remove(struct device *);
+
+static int pseudo_lld_bus_match(struct device *dev,
+				struct device_driver *dev_driver)
+{
+	return 1;
+}
+
+static struct bus_type tcm_loop_lld_bus = {
+	.name			= "tcm_loop_bus",
+	.match			= pseudo_lld_bus_match,
+	.probe			= tcm_loop_driver_probe,
+	.remove			= tcm_loop_driver_remove,
+};
+
+static struct device_driver tcm_loop_driverfs = {
+	.name			= "tcm_loop",
+	.bus			= &tcm_loop_lld_bus,
+};
+/*
+ * Used with root_device_register() in tcm_loop_alloc_core_bus() below
+ */
+struct device *tcm_loop_primary;
+
+/*
+ * Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and
+ * drivers/scsi/libiscsi.c:iscsi_change_queue_depth()
+ */
+static int tcm_loop_change_queue_depth(
+	struct scsi_device *sdev,
+	int depth,
+	int reason)
+{
+	switch (reason) {
+	case SCSI_QDEPTH_DEFAULT:
+		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+		break;
+	case SCSI_QDEPTH_QFULL:
+		scsi_track_queue_full(sdev, depth);
+		break;
+	case SCSI_QDEPTH_RAMP_UP:
+		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+	return sdev->queue_depth;
+}
+
+/*
+ * Locate the SAM Task Attr from struct scsi_cmnd *
+ */
+static int tcm_loop_sam_attr(struct scsi_cmnd *sc)
+{
+	if (sc->device->tagged_supported) {
+		switch (sc->tag) {
+		case HEAD_OF_QUEUE_TAG:
+			return MSG_HEAD_TAG;
+		case ORDERED_QUEUE_TAG:
+			return MSG_ORDERED_TAG;
+		default:
+			break;
+		}
+	}
+
+	return MSG_SIMPLE_TAG;
+}
+
+static void tcm_loop_submission_work(struct work_struct *work)
+{
+	struct tcm_loop_cmd *tl_cmd =
+		container_of(work, struct tcm_loop_cmd, work);
+	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
+	struct scsi_cmnd *sc = tl_cmd->sc;
+	struct tcm_loop_nexus *tl_nexus;
+	struct tcm_loop_hba *tl_hba;
+	struct tcm_loop_tpg *tl_tpg;
+	struct scatterlist *sgl_bidi = NULL;
+	u32 sgl_bidi_count = 0;
+	int ret;
+
+	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+
+	/*
+	 * Ensure that this tl_tpg reference from the incoming sc->device->id
+	 * has already been configured via tcm_loop_make_naa_tpg().
+	 */
+	if (!tl_tpg->tl_hba) {
+		set_host_byte(sc, DID_NO_CONNECT);
+		goto out_done;
+	}
+
+	tl_nexus = tl_hba->tl_nexus;
+	if (!tl_nexus) {
+		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
+				" does not exist\n");
+		set_host_byte(sc, DID_ERROR);
+		goto out_done;
+	}
+
+	transport_init_se_cmd(se_cmd, tl_tpg->tl_se_tpg.se_tpg_tfo,
+			tl_nexus->se_sess,
+			scsi_bufflen(sc), sc->sc_data_direction,
+			tcm_loop_sam_attr(sc), &tl_cmd->tl_sense_buf[0]);
+
+	if (scsi_bidi_cmnd(sc)) {
+		struct scsi_data_buffer *sdb = scsi_in(sc);
+
+		sgl_bidi = sdb->table.sgl;
+		sgl_bidi_count = sdb->table.nents;
+		se_cmd->se_cmd_flags |= SCF_BIDI;
+
+	}
+
+	if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
+		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+		set_host_byte(sc, DID_NO_CONNECT);
+		goto out_done;
+	}
+
+	/*
+	 * Because some userspace code via scsi-generic do not memset their
+	 * associated read buffers, go ahead and do that here for type
+	 * SCF_SCSI_CONTROL_SG_IO_CDB.  Also note that this is currently
+	 * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
+	 * by target core in transport_generic_allocate_tasks() ->
+	 * transport_generic_cmd_sequencer().
+	 */
+	if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
+	    se_cmd->data_direction == DMA_FROM_DEVICE) {
+		struct scatterlist *sg = scsi_sglist(sc);
+		unsigned char *buf = kmap(sg_page(sg)) + sg->offset;
+
+		if (buf != NULL) {
+			memset(buf, 0, sg->length);
+			kunmap(sg_page(sg));
+		}
+	}
+
+	ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
+	if (ret == -ENOMEM) {
+		transport_send_check_condition_and_sense(se_cmd,
+				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+		transport_generic_free_cmd(se_cmd, 0);
+		return;
+	} else if (ret < 0) {
+		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
+			tcm_loop_queue_status(se_cmd);
+		else
+			transport_send_check_condition_and_sense(se_cmd,
+					se_cmd->scsi_sense_reason, 0);
+		transport_generic_free_cmd(se_cmd, 0);
+		return;
+	}
+
+	ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
+			scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
+	if (ret) {
+		transport_send_check_condition_and_sense(se_cmd,
+					se_cmd->scsi_sense_reason, 0);
+		transport_generic_free_cmd(se_cmd, 0);
+		return;
+	}
+	transport_handle_cdb_direct(se_cmd);
+	return;
+
+out_done:
+	sc->scsi_done(sc);
+	return;
+}
+
+/*
+ * ->queuecommand can be and usually is called from interrupt context, so
+ * defer the actual submission to a workqueue.
+ */
+static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
+{
+	struct tcm_loop_cmd *tl_cmd;
+
+	pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
+		" scsi_buf_len: %u\n", sc->device->host->host_no,
+		sc->device->id, sc->device->channel, sc->device->lun,
+		sc->cmnd[0], scsi_bufflen(sc));
+
+	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
+	if (!tl_cmd) {
+		pr_err("Unable to allocate struct tcm_loop_cmd\n");
+		set_host_byte(sc, DID_ERROR);
+		sc->scsi_done(sc);
+		return 0;
+	}
+
+	tl_cmd->sc = sc;
+	INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
+	queue_work(tcm_loop_workqueue, &tl_cmd->work);
+	return 0;
+}
+
+/*
+ * Called from SCSI EH process context to issue a LUN_RESET TMR
+ * to struct scsi_device
+ */
+static int tcm_loop_device_reset(struct scsi_cmnd *sc)
+{
+	struct se_cmd *se_cmd = NULL;
+	struct se_portal_group *se_tpg;
+	struct se_session *se_sess;
+	struct tcm_loop_cmd *tl_cmd = NULL;
+	struct tcm_loop_hba *tl_hba;
+	struct tcm_loop_nexus *tl_nexus;
+	struct tcm_loop_tmr *tl_tmr = NULL;
+	struct tcm_loop_tpg *tl_tpg;
+	int ret = FAILED, rc;
+	/*
+	 * Locate the tcm_loop_hba_t pointer
+	 */
+	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+	/*
+	 * Locate the tl_nexus and se_sess pointers
+	 */
+	tl_nexus = tl_hba->tl_nexus;
+	if (!tl_nexus) {
+		pr_err("Unable to perform device reset without"
+				" active I_T Nexus\n");
+		return FAILED;
+	}
+	se_sess = tl_nexus->se_sess;
+	/*
+	 * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
+	 */
+	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+	se_tpg = &tl_tpg->tl_se_tpg;
+
+	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
+	if (!tl_cmd) {
+		pr_err("Unable to allocate memory for tl_cmd\n");
+		return FAILED;
+	}
+
+	tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
+	if (!tl_tmr) {
+		pr_err("Unable to allocate memory for tl_tmr\n");
+		goto release;
+	}
+	init_waitqueue_head(&tl_tmr->tl_tmr_wait);
+
+	se_cmd = &tl_cmd->tl_se_cmd;
+	/*
+	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
+	 */
+	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
+				DMA_NONE, MSG_SIMPLE_TAG,
+				&tl_cmd->tl_sense_buf[0]);
+
+	rc = core_tmr_alloc_req(se_cmd, tl_tmr, TMR_LUN_RESET, GFP_KERNEL);
+	if (rc < 0)
+		goto release;
+	/*
+	 * Locate the underlying TCM struct se_lun from sc->device->lun
+	 */
+	if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0)
+		goto release;
+	/*
+	 * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
+	 * to wake us up.
+	 */
+	transport_generic_handle_tmr(se_cmd);
+	wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
+	/*
+	 * The TMR LUN_RESET has completed, check the response status and
+	 * then release allocations.
+	 */
+	ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
+		SUCCESS : FAILED;
+release:
+	if (se_cmd)
+		transport_generic_free_cmd(se_cmd, 1);
+	else
+		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+	kfree(tl_tmr);
+	return ret;
+}
+
+static int tcm_loop_slave_alloc(struct scsi_device *sd)
+{
+	set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
+	return 0;
+}
+
+static int tcm_loop_slave_configure(struct scsi_device *sd)
+{
+	return 0;
+}
+
+static struct scsi_host_template tcm_loop_driver_template = {
+	.proc_info		= tcm_loop_proc_info,
+	.proc_name		= "tcm_loopback",
+	.name			= "TCM_Loopback",
+	.queuecommand		= tcm_loop_queuecommand,
+	.change_queue_depth	= tcm_loop_change_queue_depth,
+	.eh_device_reset_handler = tcm_loop_device_reset,
+	.can_queue		= 1024,
+	.this_id		= -1,
+	.sg_tablesize		= 256,
+	.cmd_per_lun		= 1024,
+	.max_sectors		= 0xFFFF,
+	.use_clustering		= DISABLE_CLUSTERING,
+	.slave_alloc		= tcm_loop_slave_alloc,
+	.slave_configure	= tcm_loop_slave_configure,
+	.module			= THIS_MODULE,
+};
+
+static int tcm_loop_driver_probe(struct device *dev)
+{
+	struct tcm_loop_hba *tl_hba;
+	struct Scsi_Host *sh;
+	int error;
+
+	tl_hba = to_tcm_loop_hba(dev);
+
+	sh = scsi_host_alloc(&tcm_loop_driver_template,
+			sizeof(struct tcm_loop_hba));
+	if (!sh) {
+		pr_err("Unable to allocate struct scsi_host\n");
+		return -ENODEV;
+	}
+	tl_hba->sh = sh;
+
+	/*
+	 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
+	 */
+	*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
+	/*
+	 * Setup single ID, Channel and LUN for now..
+	 */
+	sh->max_id = 2;
+	sh->max_lun = 0;
+	sh->max_channel = 0;
+	sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
+
+	error = scsi_add_host(sh, &tl_hba->dev);
+	if (error) {
+		pr_err("%s: scsi_add_host failed\n", __func__);
+		scsi_host_put(sh);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static int tcm_loop_driver_remove(struct device *dev)
+{
+	struct tcm_loop_hba *tl_hba;
+	struct Scsi_Host *sh;
+
+	tl_hba = to_tcm_loop_hba(dev);
+	sh = tl_hba->sh;
+
+	scsi_remove_host(sh);
+	scsi_host_put(sh);
+	return 0;
+}
+
+static void tcm_loop_release_adapter(struct device *dev)
+{
+	struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
+
+	kfree(tl_hba);
+}
+
+/*
+ * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
+ */
+static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
+{
+	int ret;
+
+	tl_hba->dev.bus = &tcm_loop_lld_bus;
+	tl_hba->dev.parent = tcm_loop_primary;
+	tl_hba->dev.release = &tcm_loop_release_adapter;
+	dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
+
+	ret = device_register(&tl_hba->dev);
+	if (ret) {
+		pr_err("device_register() failed for"
+				" tl_hba->dev: %d\n", ret);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/*
+ * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
+ * tcm_loop SCSI bus.
+ */
+static int tcm_loop_alloc_core_bus(void)
+{
+	int ret;
+
+	tcm_loop_primary = root_device_register("tcm_loop_0");
+	if (IS_ERR(tcm_loop_primary)) {
+		pr_err("Unable to allocate tcm_loop_primary\n");
+		return PTR_ERR(tcm_loop_primary);
+	}
+
+	ret = bus_register(&tcm_loop_lld_bus);
+	if (ret) {
+		pr_err("bus_register() failed for tcm_loop_lld_bus\n");
+		goto dev_unreg;
+	}
+
+	ret = driver_register(&tcm_loop_driverfs);
+	if (ret) {
+		pr_err("driver_register() failed for"
+				"tcm_loop_driverfs\n");
+		goto bus_unreg;
+	}
+
+	pr_debug("Initialized TCM Loop Core Bus\n");
+	return ret;
+
+bus_unreg:
+	bus_unregister(&tcm_loop_lld_bus);
+dev_unreg:
+	root_device_unregister(tcm_loop_primary);
+	return ret;
+}
+
+static void tcm_loop_release_core_bus(void)
+{
+	driver_unregister(&tcm_loop_driverfs);
+	bus_unregister(&tcm_loop_lld_bus);
+	root_device_unregister(tcm_loop_primary);
+
+	pr_debug("Releasing TCM Loop Core BUS\n");
+}
+
+static char *tcm_loop_get_fabric_name(void)
+{
+	return "loopback";
+}
+
+static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
+	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+	/*
+	 * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
+	 * time based on the protocol dependent prefix of the passed configfs group.
+	 *
+	 * Based upon tl_proto_id, TCM_Loop emulates the requested fabric
+	 * ProtocolID using target_core_fabric_lib.c symbols.
+	 */
+	switch (tl_hba->tl_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return sas_get_fabric_proto_ident(se_tpg);
+	case SCSI_PROTOCOL_FCP:
+		return fc_get_fabric_proto_ident(se_tpg);
+	case SCSI_PROTOCOL_ISCSI:
+		return iscsi_get_fabric_proto_ident(se_tpg);
+	default:
+		pr_err("Unknown tl_proto_id: 0x%02x, using"
+			" SAS emulation\n", tl_hba->tl_proto_id);
+		break;
+	}
+
+	return sas_get_fabric_proto_ident(se_tpg);
+}
+
+static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
+{
+	struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
+	/*
+	 * Return the passed NAA identifier for the SAS Target Port
+	 */
+	return &tl_tpg->tl_hba->tl_wwn_address[0];
+}
+
+static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
+{
+	struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
+	/*
+	 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
+	 * to represent the SCSI Target Port.
+	 */
+	return tl_tpg->tl_tpgt;
+}
+
+static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static u32 tcm_loop_get_pr_transport_id(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
+	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+
+	switch (tl_hba->tl_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+					format_code, buf);
+	case SCSI_PROTOCOL_FCP:
+		return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+					format_code, buf);
+	case SCSI_PROTOCOL_ISCSI:
+		return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+					format_code, buf);
+	default:
+		pr_err("Unknown tl_proto_id: 0x%02x, using"
+			" SAS emulation\n", tl_hba->tl_proto_id);
+		break;
+	}
+
+	return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+			format_code, buf);
+}
+
+static u32 tcm_loop_get_pr_transport_id_len(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
+	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+
+	switch (tl_hba->tl_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+					format_code);
+	case SCSI_PROTOCOL_FCP:
+		return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+					format_code);
+	case SCSI_PROTOCOL_ISCSI:
+		return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+					format_code);
+	default:
+		pr_err("Unknown tl_proto_id: 0x%02x, using"
+			" SAS emulation\n", tl_hba->tl_proto_id);
+		break;
+	}
+
+	return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+			format_code);
+}
+
+/*
+ * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
+ * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
+ */
+static char *tcm_loop_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
+	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+
+	switch (tl_hba->tl_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+					port_nexus_ptr);
+	case SCSI_PROTOCOL_FCP:
+		return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+					port_nexus_ptr);
+	case SCSI_PROTOCOL_ISCSI:
+		return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+					port_nexus_ptr);
+	default:
+		pr_err("Unknown tl_proto_id: 0x%02x, using"
+			" SAS emulation\n", tl_hba->tl_proto_id);
+		break;
+	}
+
+	return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+			port_nexus_ptr);
+}
+
+/*
+ * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
+ * based upon the incoming fabric dependent SCSI Initiator Port
+ */
+static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+	return 0;
+}
+
+/*
+ * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
+ * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
+ */
+static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
+{
+	return 0;
+}
+
+/*
+ * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
+ * never be called for TCM_Loop by target_core_fabric_configfs.c code.
+ * It has been added here as a nop for target_fabric_tf_ops_check()
+ */
+static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
+{
+	return 0;
+}
+
+static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
+	struct se_portal_group *se_tpg)
+{
+	struct tcm_loop_nacl *tl_nacl;
+
+	tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
+	if (!tl_nacl) {
+		pr_err("Unable to allocate struct tcm_loop_nacl\n");
+		return NULL;
+	}
+
+	return &tl_nacl->se_node_acl;
+}
+
+static void tcm_loop_tpg_release_fabric_acl(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl)
+{
+	struct tcm_loop_nacl *tl_nacl = container_of(se_nacl,
+				struct tcm_loop_nacl, se_node_acl);
+
+	kfree(tl_nacl);
+}
+
+static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
+{
+	return 1;
+}
+
+static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
+{
+	return;
+}
+
+static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
+{
+	return 1;
+}
+
+static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
+{
+	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+			struct tcm_loop_cmd, tl_se_cmd);
+
+	return tl_cmd->sc_cmd_state;
+}
+
+static int tcm_loop_shutdown_session(struct se_session *se_sess)
+{
+	return 0;
+}
+
+static void tcm_loop_close_session(struct se_session *se_sess)
+{
+	return;
+};
+
+static int tcm_loop_write_pending(struct se_cmd *se_cmd)
+{
+	/*
+	 * Since Linux/SCSI has already sent down a struct scsi_cmnd
+	 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
+	 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
+	 * format with transport_generic_map_mem_to_cmd().
+	 *
+	 * We now tell TCM to add this WRITE CDB directly into the TCM storage
+	 * object execution queue.
+	 */
+	transport_generic_process_write(se_cmd);
+	return 0;
+}
+
+static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
+{
+	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+				struct tcm_loop_cmd, tl_se_cmd);
+	struct scsi_cmnd *sc = tl_cmd->sc;
+
+	pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
+		     " cdb: 0x%02x\n", sc, sc->cmnd[0]);
+
+	sc->result = SAM_STAT_GOOD;
+	set_host_byte(sc, DID_OK);
+	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
+	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
+		scsi_set_resid(sc, se_cmd->residual_count);
+	sc->scsi_done(sc);
+	return 0;
+}
+
+static int tcm_loop_queue_status(struct se_cmd *se_cmd)
+{
+	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+				struct tcm_loop_cmd, tl_se_cmd);
+	struct scsi_cmnd *sc = tl_cmd->sc;
+
+	pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
+			" cdb: 0x%02x\n", sc, sc->cmnd[0]);
+
+	if (se_cmd->sense_buffer &&
+	   ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
+	    (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
+
+		memcpy(sc->sense_buffer, se_cmd->sense_buffer,
+				SCSI_SENSE_BUFFERSIZE);
+		sc->result = SAM_STAT_CHECK_CONDITION;
+		set_driver_byte(sc, DRIVER_SENSE);
+	} else
+		sc->result = se_cmd->scsi_status;
+
+	set_host_byte(sc, DID_OK);
+	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
+	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
+		scsi_set_resid(sc, se_cmd->residual_count);
+	sc->scsi_done(sc);
+	return 0;
+}
+
+static int tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+	struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
+	/*
+	 * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
+	 * and wake up the wait_queue_head_t in tcm_loop_device_reset()
+	 */
+	atomic_set(&tl_tmr->tmr_complete, 1);
+	wake_up(&tl_tmr->tl_tmr_wait);
+	return 0;
+}
+
+static u16 tcm_loop_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
+{
+	return 0;
+}
+
+static u16 tcm_loop_get_fabric_sense_len(void)
+{
+	return 0;
+}
+
+static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
+{
+	switch (tl_hba->tl_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+		return "SAS";
+	case SCSI_PROTOCOL_FCP:
+		return "FCP";
+	case SCSI_PROTOCOL_ISCSI:
+		return "iSCSI";
+	default:
+		break;
+	}
+
+	return "Unknown";
+}
+
+/* Start items for tcm_loop_port_cit */
+
+static int tcm_loop_port_link(
+	struct se_portal_group *se_tpg,
+	struct se_lun *lun)
+{
+	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+				struct tcm_loop_tpg, tl_se_tpg);
+	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+
+	atomic_inc(&tl_tpg->tl_tpg_port_count);
+	smp_mb__after_atomic_inc();
+	/*
+	 * Add Linux/SCSI struct scsi_device by HCTL
+	 */
+	scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
+
+	pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
+	return 0;
+}
+
+static void tcm_loop_port_unlink(
+	struct se_portal_group *se_tpg,
+	struct se_lun *se_lun)
+{
+	struct scsi_device *sd;
+	struct tcm_loop_hba *tl_hba;
+	struct tcm_loop_tpg *tl_tpg;
+
+	tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
+	tl_hba = tl_tpg->tl_hba;
+
+	sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
+				se_lun->unpacked_lun);
+	if (!sd) {
+		pr_err("Unable to locate struct scsi_device for %d:%d:"
+			"%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
+		return;
+	}
+	/*
+	 * Remove Linux/SCSI struct scsi_device by HCTL
+	 */
+	scsi_remove_device(sd);
+	scsi_device_put(sd);
+
+	atomic_dec(&tl_tpg->tl_tpg_port_count);
+	smp_mb__after_atomic_dec();
+
+	pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
+}
+
+/* End items for tcm_loop_port_cit */
+
+/* Start items for tcm_loop_nexus_cit */
+
+static int tcm_loop_make_nexus(
+	struct tcm_loop_tpg *tl_tpg,
+	const char *name)
+{
+	struct se_portal_group *se_tpg;
+	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+	struct tcm_loop_nexus *tl_nexus;
+	int ret = -ENOMEM;
+
+	if (tl_tpg->tl_hba->tl_nexus) {
+		pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
+		return -EEXIST;
+	}
+	se_tpg = &tl_tpg->tl_se_tpg;
+
+	tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
+	if (!tl_nexus) {
+		pr_err("Unable to allocate struct tcm_loop_nexus\n");
+		return -ENOMEM;
+	}
+	/*
+	 * Initialize the struct se_session pointer
+	 */
+	tl_nexus->se_sess = transport_init_session();
+	if (IS_ERR(tl_nexus->se_sess)) {
+		ret = PTR_ERR(tl_nexus->se_sess);
+		goto out;
+	}
+	/*
+	 * Since we are running in 'demo mode' this call with generate a
+	 * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
+	 * Initiator port name of the passed configfs group 'name'.
+	 */
+	tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
+				se_tpg, (unsigned char *)name);
+	if (!tl_nexus->se_sess->se_node_acl) {
+		transport_free_session(tl_nexus->se_sess);
+		goto out;
+	}
+	/*
+	 * Now, register the SAS I_T Nexus as active with the call to
+	 * transport_register_session()
+	 */
+	__transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
+			tl_nexus->se_sess, tl_nexus);
+	tl_tpg->tl_hba->tl_nexus = tl_nexus;
+	pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
+		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
+		name);
+	return 0;
+
+out:
+	kfree(tl_nexus);
+	return ret;
+}
+
+static int tcm_loop_drop_nexus(
+	struct tcm_loop_tpg *tpg)
+{
+	struct se_session *se_sess;
+	struct tcm_loop_nexus *tl_nexus;
+	struct tcm_loop_hba *tl_hba = tpg->tl_hba;
+
+	tl_nexus = tpg->tl_hba->tl_nexus;
+	if (!tl_nexus)
+		return -ENODEV;
+
+	se_sess = tl_nexus->se_sess;
+	if (!se_sess)
+		return -ENODEV;
+
+	if (atomic_read(&tpg->tl_tpg_port_count)) {
+		pr_err("Unable to remove TCM_Loop I_T Nexus with"
+			" active TPG port count: %d\n",
+			atomic_read(&tpg->tl_tpg_port_count));
+		return -EPERM;
+	}
+
+	pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
+		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
+		tl_nexus->se_sess->se_node_acl->initiatorname);
+	/*
+	 * Release the SCSI I_T Nexus to the emulated SAS Target Port
+	 */
+	transport_deregister_session(tl_nexus->se_sess);
+	tpg->tl_hba->tl_nexus = NULL;
+	kfree(tl_nexus);
+	return 0;
+}
+
+/* End items for tcm_loop_nexus_cit */
+
+static ssize_t tcm_loop_tpg_show_nexus(
+	struct se_portal_group *se_tpg,
+	char *page)
+{
+	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+			struct tcm_loop_tpg, tl_se_tpg);
+	struct tcm_loop_nexus *tl_nexus;
+	ssize_t ret;
+
+	tl_nexus = tl_tpg->tl_hba->tl_nexus;
+	if (!tl_nexus)
+		return -ENODEV;
+
+	ret = snprintf(page, PAGE_SIZE, "%s\n",
+		tl_nexus->se_sess->se_node_acl->initiatorname);
+
+	return ret;
+}
+
+static ssize_t tcm_loop_tpg_store_nexus(
+	struct se_portal_group *se_tpg,
+	const char *page,
+	size_t count)
+{
+	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+			struct tcm_loop_tpg, tl_se_tpg);
+	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+	unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
+	int ret;
+	/*
+	 * Shutdown the active I_T nexus if 'NULL' is passed..
+	 */
+	if (!strncmp(page, "NULL", 4)) {
+		ret = tcm_loop_drop_nexus(tl_tpg);
+		return (!ret) ? count : ret;
+	}
+	/*
+	 * Otherwise make sure the passed virtual Initiator port WWN matches
+	 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
+	 * tcm_loop_make_nexus()
+	 */
+	if (strlen(page) >= TL_WWN_ADDR_LEN) {
+		pr_err("Emulated NAA Sas Address: %s, exceeds"
+				" max: %d\n", page, TL_WWN_ADDR_LEN);
+		return -EINVAL;
+	}
+	snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
+
+	ptr = strstr(i_port, "naa.");
+	if (ptr) {
+		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
+			pr_err("Passed SAS Initiator Port %s does not"
+				" match target port protoid: %s\n", i_port,
+				tcm_loop_dump_proto_id(tl_hba));
+			return -EINVAL;
+		}
+		port_ptr = &i_port[0];
+		goto check_newline;
+	}
+	ptr = strstr(i_port, "fc.");
+	if (ptr) {
+		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
+			pr_err("Passed FCP Initiator Port %s does not"
+				" match target port protoid: %s\n", i_port,
+				tcm_loop_dump_proto_id(tl_hba));
+			return -EINVAL;
+		}
+		port_ptr = &i_port[3]; /* Skip over "fc." */
+		goto check_newline;
+	}
+	ptr = strstr(i_port, "iqn.");
+	if (ptr) {
+		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
+			pr_err("Passed iSCSI Initiator Port %s does not"
+				" match target port protoid: %s\n", i_port,
+				tcm_loop_dump_proto_id(tl_hba));
+			return -EINVAL;
+		}
+		port_ptr = &i_port[0];
+		goto check_newline;
+	}
+	pr_err("Unable to locate prefix for emulated Initiator Port:"
+			" %s\n", i_port);
+	return -EINVAL;
+	/*
+	 * Clear any trailing newline for the NAA WWN
+	 */
+check_newline:
+	if (i_port[strlen(i_port)-1] == '\n')
+		i_port[strlen(i_port)-1] = '\0';
+
+	ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
+	&tcm_loop_tpg_nexus.attr,
+	NULL,
+};
+
+/* Start items for tcm_loop_naa_cit */
+
+struct se_portal_group *tcm_loop_make_naa_tpg(
+	struct se_wwn *wwn,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_loop_hba *tl_hba = container_of(wwn,
+			struct tcm_loop_hba, tl_hba_wwn);
+	struct tcm_loop_tpg *tl_tpg;
+	char *tpgt_str, *end_ptr;
+	int ret;
+	unsigned short int tpgt;
+
+	tpgt_str = strstr(name, "tpgt_");
+	if (!tpgt_str) {
+		pr_err("Unable to locate \"tpgt_#\" directory"
+				" group\n");
+		return ERR_PTR(-EINVAL);
+	}
+	tpgt_str += 5; /* Skip ahead of "tpgt_" */
+	tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
+
+	if (tpgt >= TL_TPGS_PER_HBA) {
+		pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
+				" %u\n", tpgt, TL_TPGS_PER_HBA);
+		return ERR_PTR(-EINVAL);
+	}
+	tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
+	tl_tpg->tl_hba = tl_hba;
+	tl_tpg->tl_tpgt = tpgt;
+	/*
+	 * Register the tl_tpg as a emulated SAS TCM Target Endpoint
+	 */
+	ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
+			wwn, &tl_tpg->tl_se_tpg, tl_tpg,
+			TRANSPORT_TPG_TYPE_NORMAL);
+	if (ret < 0)
+		return ERR_PTR(-ENOMEM);
+
+	pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
+		" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
+		config_item_name(&wwn->wwn_group.cg_item), tpgt);
+
+	return &tl_tpg->tl_se_tpg;
+}
+
+void tcm_loop_drop_naa_tpg(
+	struct se_portal_group *se_tpg)
+{
+	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
+	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+				struct tcm_loop_tpg, tl_se_tpg);
+	struct tcm_loop_hba *tl_hba;
+	unsigned short tpgt;
+
+	tl_hba = tl_tpg->tl_hba;
+	tpgt = tl_tpg->tl_tpgt;
+	/*
+	 * Release the I_T Nexus for the Virtual SAS link if present
+	 */
+	tcm_loop_drop_nexus(tl_tpg);
+	/*
+	 * Deregister the tl_tpg as a emulated SAS TCM Target Endpoint
+	 */
+	core_tpg_deregister(se_tpg);
+
+	tl_tpg->tl_hba = NULL;
+	tl_tpg->tl_tpgt = 0;
+
+	pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
+		" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
+		config_item_name(&wwn->wwn_group.cg_item), tpgt);
+}
+
+/* End items for tcm_loop_naa_cit */
+
+/* Start items for tcm_loop_cit */
+
+struct se_wwn *tcm_loop_make_scsi_hba(
+	struct target_fabric_configfs *tf,
+	struct config_group *group,
+	const char *name)
+{
+	struct tcm_loop_hba *tl_hba;
+	struct Scsi_Host *sh;
+	char *ptr;
+	int ret, off = 0;
+
+	tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
+	if (!tl_hba) {
+		pr_err("Unable to allocate struct tcm_loop_hba\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	/*
+	 * Determine the emulated Protocol Identifier and Target Port Name
+	 * based on the incoming configfs directory name.
+	 */
+	ptr = strstr(name, "naa.");
+	if (ptr) {
+		tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
+		goto check_len;
+	}
+	ptr = strstr(name, "fc.");
+	if (ptr) {
+		tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
+		off = 3; /* Skip over "fc." */
+		goto check_len;
+	}
+	ptr = strstr(name, "iqn.");
+	if (!ptr) {
+		pr_err("Unable to locate prefix for emulated Target "
+				"Port: %s\n", name);
+		ret = -EINVAL;
+		goto out;
+	}
+	tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
+
+check_len:
+	if (strlen(name) >= TL_WWN_ADDR_LEN) {
+		pr_err("Emulated NAA %s Address: %s, exceeds"
+			" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
+			TL_WWN_ADDR_LEN);
+		ret = -EINVAL;
+		goto out;
+	}
+	snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
+
+	/*
+	 * Call device_register(tl_hba->dev) to register the emulated
+	 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
+	 * device_register() callbacks in tcm_loop_driver_probe()
+	 */
+	ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
+	if (ret)
+		goto out;
+
+	sh = tl_hba->sh;
+	tcm_loop_hba_no_cnt++;
+	pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
+		" %s Address: %s at Linux/SCSI Host ID: %d\n",
+		tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
+
+	return &tl_hba->tl_hba_wwn;
+out:
+	kfree(tl_hba);
+	return ERR_PTR(ret);
+}
+
+void tcm_loop_drop_scsi_hba(
+	struct se_wwn *wwn)
+{
+	struct tcm_loop_hba *tl_hba = container_of(wwn,
+				struct tcm_loop_hba, tl_hba_wwn);
+
+	pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
+		" SAS Address: %s at Linux/SCSI Host ID: %d\n",
+		tl_hba->tl_wwn_address, tl_hba->sh->host_no);
+	/*
+	 * Call device_unregister() on the original tl_hba->dev.
+	 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
+	 * release *tl_hba;
+	 */
+	device_unregister(&tl_hba->dev);
+}
+
+/* Start items for tcm_loop_cit */
+static ssize_t tcm_loop_wwn_show_attr_version(
+	struct target_fabric_configfs *tf,
+	char *page)
+{
+	return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
+}
+
+TF_WWN_ATTR_RO(tcm_loop, version);
+
+static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
+	&tcm_loop_wwn_version.attr,
+	NULL,
+};
+
+/* End items for tcm_loop_cit */
+
+static int tcm_loop_register_configfs(void)
+{
+	struct target_fabric_configfs *fabric;
+	int ret;
+	/*
+	 * Set the TCM Loop HBA counter to zero
+	 */
+	tcm_loop_hba_no_cnt = 0;
+	/*
+	 * Register the top level struct config_item_type with TCM core
+	 */
+	fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
+	if (IS_ERR(fabric)) {
+		pr_err("tcm_loop_register_configfs() failed!\n");
+		return PTR_ERR(fabric);
+	}
+	/*
+	 * Setup the fabric API of function pointers used by target_core_mod
+	 */
+	fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name;
+	fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident;
+	fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn;
+	fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag;
+	fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth;
+	fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id;
+	fabric->tf_ops.tpg_get_pr_transport_id_len =
+					&tcm_loop_get_pr_transport_id_len;
+	fabric->tf_ops.tpg_parse_pr_out_transport_id =
+					&tcm_loop_parse_pr_out_transport_id;
+	fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode;
+	fabric->tf_ops.tpg_check_demo_mode_cache =
+					&tcm_loop_check_demo_mode_cache;
+	fabric->tf_ops.tpg_check_demo_mode_write_protect =
+					&tcm_loop_check_demo_mode_write_protect;
+	fabric->tf_ops.tpg_check_prod_mode_write_protect =
+					&tcm_loop_check_prod_mode_write_protect;
+	/*
+	 * The TCM loopback fabric module runs in demo-mode to a local
+	 * virtual SCSI device, so fabric dependent initator ACLs are
+	 * not required.
+	 */
+	fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl;
+	fabric->tf_ops.tpg_release_fabric_acl =
+					&tcm_loop_tpg_release_fabric_acl;
+	fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
+	/*
+	 * Used for setting up remaining TCM resources in process context
+	 */
+	fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free;
+	fabric->tf_ops.release_cmd = &tcm_loop_release_cmd;
+	fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session;
+	fabric->tf_ops.close_session = &tcm_loop_close_session;
+	fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index;
+	fabric->tf_ops.sess_get_initiator_sid = NULL;
+	fabric->tf_ops.write_pending = &tcm_loop_write_pending;
+	fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status;
+	/*
+	 * Not used for TCM loopback
+	 */
+	fabric->tf_ops.set_default_node_attributes =
+					&tcm_loop_set_default_node_attributes;
+	fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag;
+	fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state;
+	fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
+	fabric->tf_ops.queue_status = &tcm_loop_queue_status;
+	fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
+	fabric->tf_ops.set_fabric_sense_len = &tcm_loop_set_fabric_sense_len;
+	fabric->tf_ops.get_fabric_sense_len = &tcm_loop_get_fabric_sense_len;
+
+	/*
+	 * Setup function pointers for generic logic in target_core_fabric_configfs.c
+	 */
+	fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba;
+	fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba;
+	fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg;
+	fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg;
+	/*
+	 * fabric_post_link() and fabric_pre_unlink() are used for
+	 * registration and release of TCM Loop Virtual SCSI LUNs.
+	 */
+	fabric->tf_ops.fabric_post_link = &tcm_loop_port_link;
+	fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink;
+	fabric->tf_ops.fabric_make_np = NULL;
+	fabric->tf_ops.fabric_drop_np = NULL;
+	/*
+	 * Setup default attribute lists for various fabric->tf_cit_tmpl
+	 */
+	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+	/*
+	 * Once fabric->tf_ops has been setup, now register the fabric for
+	 * use within TCM
+	 */
+	ret = target_fabric_configfs_register(fabric);
+	if (ret < 0) {
+		pr_err("target_fabric_configfs_register() for"
+				" TCM_Loop failed!\n");
+		target_fabric_configfs_free(fabric);
+		return -1;
+	}
+	/*
+	 * Setup our local pointer to *fabric.
+	 */
+	tcm_loop_fabric_configfs = fabric;
+	pr_debug("TCM_LOOP[0] - Set fabric ->"
+			" tcm_loop_fabric_configfs\n");
+	return 0;
+}
+
+static void tcm_loop_deregister_configfs(void)
+{
+	if (!tcm_loop_fabric_configfs)
+		return;
+
+	target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
+	tcm_loop_fabric_configfs = NULL;
+	pr_debug("TCM_LOOP[0] - Cleared"
+				" tcm_loop_fabric_configfs\n");
+}
+
+static int __init tcm_loop_fabric_init(void)
+{
+	int ret = -ENOMEM;
+
+	tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
+	if (!tcm_loop_workqueue)
+		goto out;
+
+	tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
+				sizeof(struct tcm_loop_cmd),
+				__alignof__(struct tcm_loop_cmd),
+				0, NULL);
+	if (!tcm_loop_cmd_cache) {
+		pr_debug("kmem_cache_create() for"
+			" tcm_loop_cmd_cache failed\n");
+		goto out_destroy_workqueue;
+	}
+
+	ret = tcm_loop_alloc_core_bus();
+	if (ret)
+		goto out_destroy_cache;
+
+	ret = tcm_loop_register_configfs();
+	if (ret)
+		goto out_release_core_bus;
+
+	return 0;
+
+out_release_core_bus:
+	tcm_loop_release_core_bus();
+out_destroy_cache:
+	kmem_cache_destroy(tcm_loop_cmd_cache);
+out_destroy_workqueue:
+	destroy_workqueue(tcm_loop_workqueue);
+out:
+	return ret;
+}
+
+static void __exit tcm_loop_fabric_exit(void)
+{
+	tcm_loop_deregister_configfs();
+	tcm_loop_release_core_bus();
+	kmem_cache_destroy(tcm_loop_cmd_cache);
+	destroy_workqueue(tcm_loop_workqueue);
+}
+
+MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
+MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
+MODULE_LICENSE("GPL");
+module_init(tcm_loop_fabric_init);
+module_exit(tcm_loop_fabric_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/loopback/tcm_loop.h b/ap/os/linux/linux-3.4.x/drivers/target/loopback/tcm_loop.h
new file mode 100644
index 0000000..7b54893
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/loopback/tcm_loop.h
@@ -0,0 +1,62 @@
+#define TCM_LOOP_VERSION		"v2.1-rc2"
+#define TL_WWN_ADDR_LEN			256
+#define TL_TPGS_PER_HBA			32
+
+/*
+ * Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len
+ */
+#define TL_SCSI_MAX_CMD_LEN		32
+
+struct tcm_loop_cmd {
+	/* State of Linux/SCSI CDB+Data descriptor */
+	u32 sc_cmd_state;
+	/* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */
+	struct scsi_cmnd *sc;
+	/* The TCM I/O descriptor that is accessed via container_of() */
+	struct se_cmd tl_se_cmd;
+	struct work_struct work;
+	/* Sense buffer that will be mapped into outgoing status */
+	unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER];
+};
+
+struct tcm_loop_tmr {
+	atomic_t tmr_complete;
+	wait_queue_head_t tl_tmr_wait;
+};
+
+struct tcm_loop_nexus {
+	int it_nexus_active;
+	/*
+	 * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
+	 */
+	struct scsi_host *sh;
+	/*
+	 * Pointer to TCM session for I_T Nexus
+	 */
+	struct se_session *se_sess;
+};
+
+struct tcm_loop_nacl {
+	struct se_node_acl se_node_acl;
+};
+
+struct tcm_loop_tpg {
+	unsigned short tl_tpgt;
+	atomic_t tl_tpg_port_count;
+	struct se_portal_group tl_se_tpg;
+	struct tcm_loop_hba *tl_hba;
+};
+
+struct tcm_loop_hba {
+	u8 tl_proto_id;
+	unsigned char tl_wwn_address[TL_WWN_ADDR_LEN];
+	struct se_hba_s *se_hba;
+	struct se_lun *tl_hba_lun;
+	struct se_port *tl_hba_lun_sep;
+	struct se_device_s *se_dev_hba_ptr;
+	struct tcm_loop_nexus *tl_nexus;
+	struct device dev;
+	struct Scsi_Host *sh;
+	struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
+	struct se_wwn tl_hba_wwn;
+};
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_alua.c b/ap/os/linux/linux-3.4.x/drivers/target/target_core_alua.c
new file mode 100644
index 0000000..b10f585
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_alua.c
@@ -0,0 +1,2052 @@
+/*******************************************************************************
+ * Filename:  target_core_alua.c
+ *
+ * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
+ *
+ * Copyright (c) 2009-2010 Rising Tide Systems
+ * Copyright (c) 2009-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/configfs.h>
+#include <linux/export.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <asm/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_internal.h"
+#include "target_core_alua.h"
+#include "target_core_ua.h"
+
+static int core_alua_check_transition(int state, int *primary);
+static int core_alua_set_tg_pt_secondary_state(
+		struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+		struct se_port *port, int explict, int offline);
+
+static u16 alua_lu_gps_counter;
+static u32 alua_lu_gps_count;
+
+static DEFINE_SPINLOCK(lu_gps_lock);
+static LIST_HEAD(lu_gps_list);
+
+struct t10_alua_lu_gp *default_lu_gp;
+
+/*
+ * REPORT_TARGET_PORT_GROUPS
+ *
+ * See spc4r17 section 6.27
+ */
+int target_emulate_report_target_port_groups(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
+	struct se_port *port;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	unsigned char *buf;
+	u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
+				    Target port group descriptor */
+	/*
+	 * Need at least 4 bytes of response data or else we can't
+	 * even fit the return data length.
+	 */
+	if (cmd->data_length < 4) {
+		pr_warn("REPORT TARGET PORT GROUPS allocation length %u"
+			" too small\n", cmd->data_length);
+		return -EINVAL;
+	}
+
+	buf = transport_kmap_data_sg(cmd);
+
+	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+	list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
+			tg_pt_gp_list) {
+		/*
+		 * Check if the Target port group and Target port descriptor list
+		 * based on tg_pt_gp_members count will fit into the response payload.
+		 * Otherwise, bump rd_len to let the initiator know we have exceeded
+		 * the allocation length and the response is truncated.
+		 */
+		if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
+		     cmd->data_length) {
+			rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
+			continue;
+		}
+		/*
+		 * PREF: Preferred target port bit, determine if this
+		 * bit should be set for port group.
+		 */
+		if (tg_pt_gp->tg_pt_gp_pref)
+			buf[off] = 0x80;
+		/*
+		 * Set the ASYMMETRIC ACCESS State
+		 */
+		buf[off++] |= (atomic_read(
+			&tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
+		/*
+		 * Set supported ASYMMETRIC ACCESS State bits
+		 */
+		buf[off] = 0x80; /* T_SUP */
+		buf[off] |= 0x40; /* O_SUP */
+		buf[off] |= 0x8; /* U_SUP */
+		buf[off] |= 0x4; /* S_SUP */
+		buf[off] |= 0x2; /* AN_SUP */
+		buf[off++] |= 0x1; /* AO_SUP */
+		/*
+		 * TARGET PORT GROUP
+		 */
+		buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
+		buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
+
+		off++; /* Skip over Reserved */
+		/*
+		 * STATUS CODE
+		 */
+		buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
+		/*
+		 * Vendor Specific field
+		 */
+		buf[off++] = 0x00;
+		/*
+		 * TARGET PORT COUNT
+		 */
+		buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
+		rd_len += 8;
+
+		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+		list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
+				tg_pt_gp_mem_list) {
+			port = tg_pt_gp_mem->tg_pt;
+			/*
+			 * Start Target Port descriptor format
+			 *
+			 * See spc4r17 section 6.2.7 Table 247
+			 */
+			off += 2; /* Skip over Obsolete */
+			/*
+			 * Set RELATIVE TARGET PORT IDENTIFIER
+			 */
+			buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
+			buf[off++] = (port->sep_rtpi & 0xff);
+			rd_len += 4;
+		}
+		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+	}
+	spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+	/*
+	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
+	 */
+	buf[0] = ((rd_len >> 24) & 0xff);
+	buf[1] = ((rd_len >> 16) & 0xff);
+	buf[2] = ((rd_len >> 8) & 0xff);
+	buf[3] = (rd_len & 0xff);
+
+	transport_kunmap_data_sg(cmd);
+
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+	return 0;
+}
+
+/*
+ * SET_TARGET_PORT_GROUPS for explict ALUA operation.
+ *
+ * See spc4r17 section 6.35
+ */
+int target_emulate_set_target_port_groups(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct se_device *dev = cmd->se_dev;
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	struct se_port *port, *l_port = cmd->se_lun->lun_sep;
+	struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
+	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
+	unsigned char *buf;
+	unsigned char *ptr;
+	u32 len = 4; /* Skip over RESERVED area in header */
+	int alua_access_state, primary = 0, rc;
+	u16 tg_pt_id, rtpi;
+
+	if (!l_port) {
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		return -EINVAL;
+	}
+	buf = transport_kmap_data_sg(cmd);
+
+	/*
+	 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
+	 * for the local tg_pt_gp.
+	 */
+	l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
+	if (!l_tg_pt_gp_mem) {
+		pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
+		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+		rc = -EINVAL;
+		goto out;
+	}
+	spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
+	if (!l_tg_pt_gp) {
+		spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
+		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+		rc = -EINVAL;
+		goto out;
+	}
+	rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
+	spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+	if (!rc) {
+		pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
+				" while TPGS_EXPLICT_ALUA is disabled\n");
+		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+		rc = -EINVAL;
+		goto out;
+	}
+
+	ptr = &buf[4]; /* Skip over RESERVED area in header */
+
+	while (len < cmd->data_length) {
+		alua_access_state = (ptr[0] & 0x0f);
+		/*
+		 * Check the received ALUA access state, and determine if
+		 * the state is a primary or secondary target port asymmetric
+		 * access state.
+		 */
+		rc = core_alua_check_transition(alua_access_state, &primary);
+		if (rc != 0) {
+			/*
+			 * If the SET TARGET PORT GROUPS attempts to establish
+			 * an invalid combination of target port asymmetric
+			 * access states or attempts to establish an
+			 * unsupported target port asymmetric access state,
+			 * then the command shall be terminated with CHECK
+			 * CONDITION status, with the sense key set to ILLEGAL
+			 * REQUEST, and the additional sense code set to INVALID
+			 * FIELD IN PARAMETER LIST.
+			 */
+			cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+			rc = -EINVAL;
+			goto out;
+		}
+		rc = -1;
+		/*
+		 * If the ASYMMETRIC ACCESS STATE field (see table 267)
+		 * specifies a primary target port asymmetric access state,
+		 * then the TARGET PORT GROUP OR TARGET PORT field specifies
+		 * a primary target port group for which the primary target
+		 * port asymmetric access state shall be changed. If the
+		 * ASYMMETRIC ACCESS STATE field specifies a secondary target
+		 * port asymmetric access state, then the TARGET PORT GROUP OR
+		 * TARGET PORT field specifies the relative target port
+		 * identifier (see 3.1.120) of the target port for which the
+		 * secondary target port asymmetric access state shall be
+		 * changed.
+		 */
+		if (primary) {
+			tg_pt_id = get_unaligned_be16(ptr + 2);
+			/*
+			 * Locate the matching target port group ID from
+			 * the global tg_pt_gp list
+			 */
+			spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+			list_for_each_entry(tg_pt_gp,
+					&su_dev->t10_alua.tg_pt_gps_list,
+					tg_pt_gp_list) {
+				if (!tg_pt_gp->tg_pt_gp_valid_id)
+					continue;
+
+				if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
+					continue;
+
+				atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+				smp_mb__after_atomic_inc();
+				spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+
+				rc = core_alua_do_port_transition(tg_pt_gp,
+						dev, l_port, nacl,
+						alua_access_state, 1);
+
+				spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+				atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+				smp_mb__after_atomic_dec();
+				break;
+			}
+			spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+			/*
+			 * If not matching target port group ID can be located
+			 * throw an exception with ASCQ: INVALID_PARAMETER_LIST
+			 */
+			if (rc != 0) {
+				cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+				rc = -EINVAL;
+				goto out;
+			}
+		} else {
+			/*
+			 * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
+			 * the Target Port in question for the the incoming
+			 * SET_TARGET_PORT_GROUPS op.
+			 */
+			rtpi = get_unaligned_be16(ptr + 2);
+			/*
+			 * Locate the matching relative target port identifer
+			 * for the struct se_device storage object.
+			 */
+			spin_lock(&dev->se_port_lock);
+			list_for_each_entry(port, &dev->dev_sep_list,
+							sep_list) {
+				if (port->sep_rtpi != rtpi)
+					continue;
+
+				tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+				spin_unlock(&dev->se_port_lock);
+
+				rc = core_alua_set_tg_pt_secondary_state(
+						tg_pt_gp_mem, port, 1, 1);
+
+				spin_lock(&dev->se_port_lock);
+				break;
+			}
+			spin_unlock(&dev->se_port_lock);
+			/*
+			 * If not matching relative target port identifier can
+			 * be located, throw an exception with ASCQ:
+			 * INVALID_PARAMETER_LIST
+			 */
+			if (rc != 0) {
+				cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+				rc = -EINVAL;
+				goto out;
+			}
+		}
+
+		ptr += 4;
+		len += 4;
+	}
+
+out:
+	transport_kunmap_data_sg(cmd);
+	if (!rc) {
+		task->task_scsi_status = GOOD;
+		transport_complete_task(task, 1);
+	}
+	return rc;
+}
+
+static inline int core_alua_state_nonoptimized(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	int nonop_delay_msecs,
+	u8 *alua_ascq)
+{
+	/*
+	 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
+	 * later to determine if processing of this cmd needs to be
+	 * temporarily delayed for the Active/NonOptimized primary access state.
+	 */
+	cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
+	cmd->alua_nonop_delay = nonop_delay_msecs;
+	return 0;
+}
+
+static inline int core_alua_state_standby(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u8 *alua_ascq)
+{
+	/*
+	 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
+	 * spc4r17 section 5.9.2.4.4
+	 */
+	switch (cdb[0]) {
+	case INQUIRY:
+	case LOG_SELECT:
+	case LOG_SENSE:
+	case MODE_SELECT:
+	case MODE_SENSE:
+	case REPORT_LUNS:
+	case RECEIVE_DIAGNOSTIC:
+	case SEND_DIAGNOSTIC:
+		return 0;
+	case MAINTENANCE_IN:
+		switch (cdb[1] & 0x1f) {
+		case MI_REPORT_TARGET_PGS:
+			return 0;
+		default:
+			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+			return 1;
+		}
+	case MAINTENANCE_OUT:
+		switch (cdb[1]) {
+		case MO_SET_TARGET_PGS:
+			return 0;
+		default:
+			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+			return 1;
+		}
+	case REQUEST_SENSE:
+	case PERSISTENT_RESERVE_IN:
+	case PERSISTENT_RESERVE_OUT:
+	case READ_BUFFER:
+	case WRITE_BUFFER:
+		return 0;
+	default:
+		*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline int core_alua_state_unavailable(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u8 *alua_ascq)
+{
+	/*
+	 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
+	 * spc4r17 section 5.9.2.4.5
+	 */
+	switch (cdb[0]) {
+	case INQUIRY:
+	case REPORT_LUNS:
+		return 0;
+	case MAINTENANCE_IN:
+		switch (cdb[1] & 0x1f) {
+		case MI_REPORT_TARGET_PGS:
+			return 0;
+		default:
+			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+			return 1;
+		}
+	case MAINTENANCE_OUT:
+		switch (cdb[1]) {
+		case MO_SET_TARGET_PGS:
+			return 0;
+		default:
+			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+			return 1;
+		}
+	case REQUEST_SENSE:
+	case READ_BUFFER:
+	case WRITE_BUFFER:
+		return 0;
+	default:
+		*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline int core_alua_state_transition(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u8 *alua_ascq)
+{
+	/*
+	 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
+	 * spc4r17 section 5.9.2.5
+	 */
+	switch (cdb[0]) {
+	case INQUIRY:
+	case REPORT_LUNS:
+		return 0;
+	case MAINTENANCE_IN:
+		switch (cdb[1] & 0x1f) {
+		case MI_REPORT_TARGET_PGS:
+			return 0;
+		default:
+			*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+			return 1;
+		}
+	case REQUEST_SENSE:
+	case READ_BUFFER:
+	case WRITE_BUFFER:
+		return 0;
+	default:
+		*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+		return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
+ * in transport_cmd_sequencer().  This function is assigned to
+ * struct t10_alua *->state_check() in core_setup_alua()
+ */
+static int core_alua_state_check_nop(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u8 *alua_ascq)
+{
+	return 0;
+}
+
+/*
+ * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
+ * This function is assigned to struct t10_alua *->state_check() in
+ * core_setup_alua()
+ *
+ * Also, this function can return three different return codes to
+ * signal transport_generic_cmd_sequencer()
+ *
+ * return 1: Is used to signal LUN not accecsable, and check condition/not ready
+ * return 0: Used to signal success
+ * reutrn -1: Used to signal failure, and invalid cdb field
+ */
+static int core_alua_state_check(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u8 *alua_ascq)
+{
+	struct se_lun *lun = cmd->se_lun;
+	struct se_port *port = lun->lun_sep;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	int out_alua_state, nonop_delay_msecs;
+
+	if (!port)
+		return 0;
+	/*
+	 * First, check for a struct se_port specific secondary ALUA target port
+	 * access state: OFFLINE
+	 */
+	if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
+		*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
+		pr_debug("ALUA: Got secondary offline status for local"
+				" target port\n");
+		*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
+		return 1;
+	}
+	 /*
+	 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
+	 * ALUA target port group, to obtain current ALUA access state.
+	 * Otherwise look for the underlying struct se_device association with
+	 * a ALUA logical unit group.
+	 */
+	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+	nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	/*
+	 * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional
+	 * statement so the compiler knows explicitly to check this case first.
+	 * For the Optimized ALUA access state case, we want to process the
+	 * incoming fabric cmd ASAP..
+	 */
+	if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
+		return 0;
+
+	switch (out_alua_state) {
+	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+		return core_alua_state_nonoptimized(cmd, cdb,
+					nonop_delay_msecs, alua_ascq);
+	case ALUA_ACCESS_STATE_STANDBY:
+		return core_alua_state_standby(cmd, cdb, alua_ascq);
+	case ALUA_ACCESS_STATE_UNAVAILABLE:
+		return core_alua_state_unavailable(cmd, cdb, alua_ascq);
+	case ALUA_ACCESS_STATE_TRANSITION:
+		return core_alua_state_transition(cmd, cdb, alua_ascq);
+	/*
+	 * OFFLINE is a secondary ALUA target port group access state, that is
+	 * handled above with struct se_port->sep_tg_pt_secondary_offline=1
+	 */
+	case ALUA_ACCESS_STATE_OFFLINE:
+	default:
+		pr_err("Unknown ALUA access state: 0x%02x\n",
+				out_alua_state);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * Check implict and explict ALUA state change request.
+ */
+static int core_alua_check_transition(int state, int *primary)
+{
+	switch (state) {
+	case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
+	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+	case ALUA_ACCESS_STATE_STANDBY:
+	case ALUA_ACCESS_STATE_UNAVAILABLE:
+		/*
+		 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
+		 * defined as primary target port asymmetric access states.
+		 */
+		*primary = 1;
+		break;
+	case ALUA_ACCESS_STATE_OFFLINE:
+		/*
+		 * OFFLINE state is defined as a secondary target port
+		 * asymmetric access state.
+		 */
+		*primary = 0;
+		break;
+	default:
+		pr_err("Unknown ALUA access state: 0x%02x\n", state);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static char *core_alua_dump_state(int state)
+{
+	switch (state) {
+	case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
+		return "Active/Optimized";
+	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+		return "Active/NonOptimized";
+	case ALUA_ACCESS_STATE_STANDBY:
+		return "Standby";
+	case ALUA_ACCESS_STATE_UNAVAILABLE:
+		return "Unavailable";
+	case ALUA_ACCESS_STATE_OFFLINE:
+		return "Offline";
+	default:
+		return "Unknown";
+	}
+
+	return NULL;
+}
+
+char *core_alua_dump_status(int status)
+{
+	switch (status) {
+	case ALUA_STATUS_NONE:
+		return "None";
+	case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
+		return "Altered by Explict STPG";
+	case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
+		return "Altered by Implict ALUA";
+	default:
+		return "Unknown";
+	}
+
+	return NULL;
+}
+
+/*
+ * Used by fabric modules to determine when we need to delay processing
+ * for the Active/NonOptimized paths..
+ */
+int core_alua_check_nonop_delay(
+	struct se_cmd *cmd)
+{
+	if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
+		return 0;
+	if (in_interrupt())
+		return 0;
+	/*
+	 * The ALUA Active/NonOptimized access state delay can be disabled
+	 * in via configfs with a value of zero
+	 */
+	if (!cmd->alua_nonop_delay)
+		return 0;
+	/*
+	 * struct se_cmd->alua_nonop_delay gets set by a target port group
+	 * defined interval in core_alua_state_nonoptimized()
+	 */
+	msleep_interruptible(cmd->alua_nonop_delay);
+	return 0;
+}
+EXPORT_SYMBOL(core_alua_check_nonop_delay);
+
+/*
+ * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
+ *
+ */
+static int core_alua_write_tpg_metadata(
+	const char *path,
+	unsigned char *md_buf,
+	u32 md_buf_len)
+{
+	mm_segment_t old_fs;
+	struct file *file;
+	struct iovec iov[1];
+	int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
+
+	memset(iov, 0, sizeof(struct iovec));
+
+	file = filp_open(path, flags, 0600);
+	if (IS_ERR(file) || !file || !file->f_dentry) {
+		pr_err("filp_open(%s) for ALUA metadata failed\n",
+			path);
+		return -ENODEV;
+	}
+
+	iov[0].iov_base = &md_buf[0];
+	iov[0].iov_len = md_buf_len;
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
+	set_fs(old_fs);
+
+	if (ret < 0) {
+		pr_err("Error writing ALUA metadata file: %s\n", path);
+		filp_close(file, NULL);
+		return -EIO;
+	}
+	filp_close(file, NULL);
+
+	return 0;
+}
+
+/*
+ * Called with tg_pt_gp->tg_pt_gp_md_mutex held
+ */
+static int core_alua_update_tpg_primary_metadata(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	int primary_state,
+	unsigned char *md_buf)
+{
+	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+	struct t10_wwn *wwn = &su_dev->t10_wwn;
+	char path[ALUA_METADATA_PATH_LEN];
+	int len;
+
+	memset(path, 0, ALUA_METADATA_PATH_LEN);
+
+	len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
+			"tg_pt_gp_id=%hu\n"
+			"alua_access_state=0x%02x\n"
+			"alua_access_status=0x%02x\n",
+			tg_pt_gp->tg_pt_gp_id, primary_state,
+			tg_pt_gp->tg_pt_gp_alua_access_status);
+
+	snprintf(path, ALUA_METADATA_PATH_LEN,
+		"/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
+		config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
+
+	return core_alua_write_tpg_metadata(path, md_buf, len);
+}
+
+static int core_alua_do_transition_tg_pt(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	struct se_port *l_port,
+	struct se_node_acl *nacl,
+	unsigned char *md_buf,
+	int new_state,
+	int explict)
+{
+	struct se_dev_entry *se_deve;
+	struct se_lun_acl *lacl;
+	struct se_port *port;
+	struct t10_alua_tg_pt_gp_member *mem;
+	int old_state = 0;
+	/*
+	 * Save the old primary ALUA access state, and set the current state
+	 * to ALUA_ACCESS_STATE_TRANSITION.
+	 */
+	old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+			ALUA_ACCESS_STATE_TRANSITION);
+	tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
+				ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
+				ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
+	/*
+	 * Check for the optional ALUA primary state transition delay
+	 */
+	if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
+		msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
+				tg_pt_gp_mem_list) {
+		port = mem->tg_pt;
+		/*
+		 * After an implicit target port asymmetric access state
+		 * change, a device server shall establish a unit attention
+		 * condition for the initiator port associated with every I_T
+		 * nexus with the additional sense code set to ASYMMETRIC
+		 * ACCESS STATE CHAGED.
+		 *
+		 * After an explicit target port asymmetric access state
+		 * change, a device server shall establish a unit attention
+		 * condition with the additional sense code set to ASYMMETRIC
+		 * ACCESS STATE CHANGED for the initiator port associated with
+		 * every I_T nexus other than the I_T nexus on which the SET
+		 * TARGET PORT GROUPS command
+		 */
+		atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+		spin_lock_bh(&port->sep_alua_lock);
+		list_for_each_entry(se_deve, &port->sep_alua_list,
+					alua_port_list) {
+			lacl = se_deve->se_lun_acl;
+			/*
+			 * se_deve->se_lun_acl pointer may be NULL for a
+			 * entry created without explict Node+MappedLUN ACLs
+			 */
+			if (!lacl)
+				continue;
+
+			if (explict &&
+			   (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
+			   (l_port != NULL) && (l_port == port))
+				continue;
+
+			core_scsi3_ua_allocate(lacl->se_lun_nacl,
+				se_deve->mapped_lun, 0x2A,
+				ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
+		}
+		spin_unlock_bh(&port->sep_alua_lock);
+
+		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+		atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+	/*
+	 * Update the ALUA metadata buf that has been allocated in
+	 * core_alua_do_port_transition(), this metadata will be written
+	 * to struct file.
+	 *
+	 * Note that there is the case where we do not want to update the
+	 * metadata when the saved metadata is being parsed in userspace
+	 * when setting the existing port access state and access status.
+	 *
+	 * Also note that the failure to write out the ALUA metadata to
+	 * struct file does NOT affect the actual ALUA transition.
+	 */
+	if (tg_pt_gp->tg_pt_gp_write_metadata) {
+		mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
+		core_alua_update_tpg_primary_metadata(tg_pt_gp,
+					new_state, md_buf);
+		mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
+	}
+	/*
+	 * Set the current primary ALUA access state to the requested new state
+	 */
+	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
+
+	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
+		" from primary access state %s to %s\n", (explict) ? "explict" :
+		"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+		tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
+		core_alua_dump_state(new_state));
+
+	return 0;
+}
+
+int core_alua_do_port_transition(
+	struct t10_alua_tg_pt_gp *l_tg_pt_gp,
+	struct se_device *l_dev,
+	struct se_port *l_port,
+	struct se_node_acl *l_nacl,
+	int new_state,
+	int explict)
+{
+	struct se_device *dev;
+	struct se_port *port;
+	struct se_subsystem_dev *su_dev;
+	struct se_node_acl *nacl;
+	struct t10_alua_lu_gp *lu_gp;
+	struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	unsigned char *md_buf;
+	int primary;
+
+	if (core_alua_check_transition(new_state, &primary) != 0)
+		return -EINVAL;
+
+	md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
+	if (!md_buf) {
+		pr_err("Unable to allocate buf for ALUA metadata\n");
+		return -ENOMEM;
+	}
+
+	local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
+	spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
+	lu_gp = local_lu_gp_mem->lu_gp;
+	atomic_inc(&lu_gp->lu_gp_ref_cnt);
+	smp_mb__after_atomic_inc();
+	spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
+	/*
+	 * For storage objects that are members of the 'default_lu_gp',
+	 * we only do transition on the passed *l_tp_pt_gp, and not
+	 * on all of the matching target port groups IDs in default_lu_gp.
+	 */
+	if (!lu_gp->lu_gp_id) {
+		/*
+		 * core_alua_do_transition_tg_pt() will always return
+		 * success.
+		 */
+		core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
+					md_buf, new_state, explict);
+		atomic_dec(&lu_gp->lu_gp_ref_cnt);
+		smp_mb__after_atomic_dec();
+		kfree(md_buf);
+		return 0;
+	}
+	/*
+	 * For all other LU groups aside from 'default_lu_gp', walk all of
+	 * the associated storage objects looking for a matching target port
+	 * group ID from the local target port group.
+	 */
+	spin_lock(&lu_gp->lu_gp_lock);
+	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
+				lu_gp_mem_list) {
+
+		dev = lu_gp_mem->lu_gp_mem_dev;
+		su_dev = dev->se_sub_dev;
+		atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&lu_gp->lu_gp_lock);
+
+		spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+		list_for_each_entry(tg_pt_gp,
+				&su_dev->t10_alua.tg_pt_gps_list,
+				tg_pt_gp_list) {
+
+			if (!tg_pt_gp->tg_pt_gp_valid_id)
+				continue;
+			/*
+			 * If the target behavior port asymmetric access state
+			 * is changed for any target port group accessiable via
+			 * a logical unit within a LU group, the target port
+			 * behavior group asymmetric access states for the same
+			 * target port group accessible via other logical units
+			 * in that LU group will also change.
+			 */
+			if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
+				continue;
+
+			if (l_tg_pt_gp == tg_pt_gp) {
+				port = l_port;
+				nacl = l_nacl;
+			} else {
+				port = NULL;
+				nacl = NULL;
+			}
+			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+			smp_mb__after_atomic_inc();
+			spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+			/*
+			 * core_alua_do_transition_tg_pt() will always return
+			 * success.
+			 */
+			core_alua_do_transition_tg_pt(tg_pt_gp, port,
+					nacl, md_buf, new_state, explict);
+
+			spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+			atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+			smp_mb__after_atomic_dec();
+		}
+		spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+
+		spin_lock(&lu_gp->lu_gp_lock);
+		atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&lu_gp->lu_gp_lock);
+
+	pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
+		" Group IDs: %hu %s transition to primary state: %s\n",
+		config_item_name(&lu_gp->lu_gp_group.cg_item),
+		l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
+		core_alua_dump_state(new_state));
+
+	atomic_dec(&lu_gp->lu_gp_ref_cnt);
+	smp_mb__after_atomic_dec();
+	kfree(md_buf);
+	return 0;
+}
+
+/*
+ * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
+ */
+static int core_alua_update_tpg_secondary_metadata(
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+	struct se_port *port,
+	unsigned char *md_buf,
+	u32 md_buf_len)
+{
+	struct se_portal_group *se_tpg = port->sep_tpg;
+	char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
+	int len;
+
+	memset(path, 0, ALUA_METADATA_PATH_LEN);
+	memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
+
+	len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
+			se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
+
+	if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
+		snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
+				se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
+
+	len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
+			"alua_tg_pt_status=0x%02x\n",
+			atomic_read(&port->sep_tg_pt_secondary_offline),
+			port->sep_tg_pt_secondary_stat);
+
+	snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
+			se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
+			port->sep_lun->unpacked_lun);
+
+	return core_alua_write_tpg_metadata(path, md_buf, len);
+}
+
+static int core_alua_set_tg_pt_secondary_state(
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+	struct se_port *port,
+	int explict,
+	int offline)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	unsigned char *md_buf;
+	u32 md_buf_len;
+	int trans_delay_msecs;
+
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	if (!tg_pt_gp) {
+		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		pr_err("Unable to complete secondary state"
+				" transition\n");
+		return -EINVAL;
+	}
+	trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
+	/*
+	 * Set the secondary ALUA target port access state to OFFLINE
+	 * or release the previously secondary state for struct se_port
+	 */
+	if (offline)
+		atomic_set(&port->sep_tg_pt_secondary_offline, 1);
+	else
+		atomic_set(&port->sep_tg_pt_secondary_offline, 0);
+
+	md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
+	port->sep_tg_pt_secondary_stat = (explict) ?
+			ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
+			ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
+
+	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
+		" to secondary access state: %s\n", (explict) ? "explict" :
+		"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+		tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
+
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	/*
+	 * Do the optional transition delay after we set the secondary
+	 * ALUA access state.
+	 */
+	if (trans_delay_msecs != 0)
+		msleep_interruptible(trans_delay_msecs);
+	/*
+	 * See if we need to update the ALUA fabric port metadata for
+	 * secondary state and status
+	 */
+	if (port->sep_tg_pt_secondary_write_md) {
+		md_buf = kzalloc(md_buf_len, GFP_KERNEL);
+		if (!md_buf) {
+			pr_err("Unable to allocate md_buf for"
+				" secondary ALUA access metadata\n");
+			return -ENOMEM;
+		}
+		mutex_lock(&port->sep_tg_pt_md_mutex);
+		core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
+				md_buf, md_buf_len);
+		mutex_unlock(&port->sep_tg_pt_md_mutex);
+
+		kfree(md_buf);
+	}
+
+	return 0;
+}
+
+struct t10_alua_lu_gp *
+core_alua_allocate_lu_gp(const char *name, int def_group)
+{
+	struct t10_alua_lu_gp *lu_gp;
+
+	lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
+	if (!lu_gp) {
+		pr_err("Unable to allocate struct t10_alua_lu_gp\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&lu_gp->lu_gp_node);
+	INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
+	spin_lock_init(&lu_gp->lu_gp_lock);
+	atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
+
+	if (def_group) {
+		lu_gp->lu_gp_id = alua_lu_gps_counter++;
+		lu_gp->lu_gp_valid_id = 1;
+		alua_lu_gps_count++;
+	}
+
+	return lu_gp;
+}
+
+int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
+{
+	struct t10_alua_lu_gp *lu_gp_tmp;
+	u16 lu_gp_id_tmp;
+	/*
+	 * The lu_gp->lu_gp_id may only be set once..
+	 */
+	if (lu_gp->lu_gp_valid_id) {
+		pr_warn("ALUA LU Group already has a valid ID,"
+			" ignoring request\n");
+		return -EINVAL;
+	}
+
+	spin_lock(&lu_gps_lock);
+	if (alua_lu_gps_count == 0x0000ffff) {
+		pr_err("Maximum ALUA alua_lu_gps_count:"
+				" 0x0000ffff reached\n");
+		spin_unlock(&lu_gps_lock);
+		kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
+		return -ENOSPC;
+	}
+again:
+	lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
+				alua_lu_gps_counter++;
+
+	list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
+		if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
+			if (!lu_gp_id)
+				goto again;
+
+			pr_warn("ALUA Logical Unit Group ID: %hu"
+				" already exists, ignoring request\n",
+				lu_gp_id);
+			spin_unlock(&lu_gps_lock);
+			return -EINVAL;
+		}
+	}
+
+	lu_gp->lu_gp_id = lu_gp_id_tmp;
+	lu_gp->lu_gp_valid_id = 1;
+	list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
+	alua_lu_gps_count++;
+	spin_unlock(&lu_gps_lock);
+
+	return 0;
+}
+
+static struct t10_alua_lu_gp_member *
+core_alua_allocate_lu_gp_mem(struct se_device *dev)
+{
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+
+	lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
+	if (!lu_gp_mem) {
+		pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
+	spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
+	atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
+
+	lu_gp_mem->lu_gp_mem_dev = dev;
+	dev->dev_alua_lu_gp_mem = lu_gp_mem;
+
+	return lu_gp_mem;
+}
+
+void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
+{
+	struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
+	/*
+	 * Once we have reached this point, config_item_put() has
+	 * already been called from target_core_alua_drop_lu_gp().
+	 *
+	 * Here, we remove the *lu_gp from the global list so that
+	 * no associations can be made while we are releasing
+	 * struct t10_alua_lu_gp.
+	 */
+	spin_lock(&lu_gps_lock);
+	list_del(&lu_gp->lu_gp_node);
+	alua_lu_gps_count--;
+	spin_unlock(&lu_gps_lock);
+	/*
+	 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
+	 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
+	 * released with core_alua_put_lu_gp_from_name()
+	 */
+	while (atomic_read(&lu_gp->lu_gp_ref_cnt))
+		cpu_relax();
+	/*
+	 * Release reference to struct t10_alua_lu_gp * from all associated
+	 * struct se_device.
+	 */
+	spin_lock(&lu_gp->lu_gp_lock);
+	list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
+				&lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
+		if (lu_gp_mem->lu_gp_assoc) {
+			list_del(&lu_gp_mem->lu_gp_mem_list);
+			lu_gp->lu_gp_members--;
+			lu_gp_mem->lu_gp_assoc = 0;
+		}
+		spin_unlock(&lu_gp->lu_gp_lock);
+		/*
+		 *
+		 * lu_gp_mem is associated with a single
+		 * struct se_device->dev_alua_lu_gp_mem, and is released when
+		 * struct se_device is released via core_alua_free_lu_gp_mem().
+		 *
+		 * If the passed lu_gp does NOT match the default_lu_gp, assume
+		 * we want to re-assocate a given lu_gp_mem with default_lu_gp.
+		 */
+		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+		if (lu_gp != default_lu_gp)
+			__core_alua_attach_lu_gp_mem(lu_gp_mem,
+					default_lu_gp);
+		else
+			lu_gp_mem->lu_gp = NULL;
+		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+		spin_lock(&lu_gp->lu_gp_lock);
+	}
+	spin_unlock(&lu_gp->lu_gp_lock);
+
+	kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
+}
+
+void core_alua_free_lu_gp_mem(struct se_device *dev)
+{
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	struct t10_alua *alua = &su_dev->t10_alua;
+	struct t10_alua_lu_gp *lu_gp;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+
+	if (alua->alua_type != SPC3_ALUA_EMULATED)
+		return;
+
+	lu_gp_mem = dev->dev_alua_lu_gp_mem;
+	if (!lu_gp_mem)
+		return;
+
+	while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
+		cpu_relax();
+
+	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+	lu_gp = lu_gp_mem->lu_gp;
+	if (lu_gp) {
+		spin_lock(&lu_gp->lu_gp_lock);
+		if (lu_gp_mem->lu_gp_assoc) {
+			list_del(&lu_gp_mem->lu_gp_mem_list);
+			lu_gp->lu_gp_members--;
+			lu_gp_mem->lu_gp_assoc = 0;
+		}
+		spin_unlock(&lu_gp->lu_gp_lock);
+		lu_gp_mem->lu_gp = NULL;
+	}
+	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+	kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
+}
+
+struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
+{
+	struct t10_alua_lu_gp *lu_gp;
+	struct config_item *ci;
+
+	spin_lock(&lu_gps_lock);
+	list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
+		if (!lu_gp->lu_gp_valid_id)
+			continue;
+		ci = &lu_gp->lu_gp_group.cg_item;
+		if (!strcmp(config_item_name(ci), name)) {
+			atomic_inc(&lu_gp->lu_gp_ref_cnt);
+			spin_unlock(&lu_gps_lock);
+			return lu_gp;
+		}
+	}
+	spin_unlock(&lu_gps_lock);
+
+	return NULL;
+}
+
+void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
+{
+	spin_lock(&lu_gps_lock);
+	atomic_dec(&lu_gp->lu_gp_ref_cnt);
+	spin_unlock(&lu_gps_lock);
+}
+
+/*
+ * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
+ */
+void __core_alua_attach_lu_gp_mem(
+	struct t10_alua_lu_gp_member *lu_gp_mem,
+	struct t10_alua_lu_gp *lu_gp)
+{
+	spin_lock(&lu_gp->lu_gp_lock);
+	lu_gp_mem->lu_gp = lu_gp;
+	lu_gp_mem->lu_gp_assoc = 1;
+	list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
+	lu_gp->lu_gp_members++;
+	spin_unlock(&lu_gp->lu_gp_lock);
+}
+
+/*
+ * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
+ */
+void __core_alua_drop_lu_gp_mem(
+	struct t10_alua_lu_gp_member *lu_gp_mem,
+	struct t10_alua_lu_gp *lu_gp)
+{
+	spin_lock(&lu_gp->lu_gp_lock);
+	list_del(&lu_gp_mem->lu_gp_mem_list);
+	lu_gp_mem->lu_gp = NULL;
+	lu_gp_mem->lu_gp_assoc = 0;
+	lu_gp->lu_gp_members--;
+	spin_unlock(&lu_gp->lu_gp_lock);
+}
+
+struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
+	struct se_subsystem_dev *su_dev,
+	const char *name,
+	int def_group)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+
+	tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
+	if (!tg_pt_gp) {
+		pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
+		return NULL;
+	}
+	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
+	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
+	mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
+	spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
+	atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
+	tg_pt_gp->tg_pt_gp_su_dev = su_dev;
+	tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
+	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+		ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
+	/*
+	 * Enable both explict and implict ALUA support by default
+	 */
+	tg_pt_gp->tg_pt_gp_alua_access_type =
+			TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
+	/*
+	 * Set the default Active/NonOptimized Delay in milliseconds
+	 */
+	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
+	tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
+
+	if (def_group) {
+		spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+		tg_pt_gp->tg_pt_gp_id =
+				su_dev->t10_alua.alua_tg_pt_gps_counter++;
+		tg_pt_gp->tg_pt_gp_valid_id = 1;
+		su_dev->t10_alua.alua_tg_pt_gps_count++;
+		list_add_tail(&tg_pt_gp->tg_pt_gp_list,
+			      &su_dev->t10_alua.tg_pt_gps_list);
+		spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+	}
+
+	return tg_pt_gp;
+}
+
+int core_alua_set_tg_pt_gp_id(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	u16 tg_pt_gp_id)
+{
+	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+	struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
+	u16 tg_pt_gp_id_tmp;
+	/*
+	 * The tg_pt_gp->tg_pt_gp_id may only be set once..
+	 */
+	if (tg_pt_gp->tg_pt_gp_valid_id) {
+		pr_warn("ALUA TG PT Group already has a valid ID,"
+			" ignoring request\n");
+		return -EINVAL;
+	}
+
+	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+	if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
+		pr_err("Maximum ALUA alua_tg_pt_gps_count:"
+			" 0x0000ffff reached\n");
+		spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+		kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
+		return -ENOSPC;
+	}
+again:
+	tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
+			su_dev->t10_alua.alua_tg_pt_gps_counter++;
+
+	list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list,
+			tg_pt_gp_list) {
+		if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
+			if (!tg_pt_gp_id)
+				goto again;
+
+			pr_err("ALUA Target Port Group ID: %hu already"
+				" exists, ignoring request\n", tg_pt_gp_id);
+			spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+			return -EINVAL;
+		}
+	}
+
+	tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
+	tg_pt_gp->tg_pt_gp_valid_id = 1;
+	list_add_tail(&tg_pt_gp->tg_pt_gp_list,
+			&su_dev->t10_alua.tg_pt_gps_list);
+	su_dev->t10_alua.alua_tg_pt_gps_count++;
+	spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+
+	return 0;
+}
+
+struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
+	struct se_port *port)
+{
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+
+	tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
+				GFP_KERNEL);
+	if (!tg_pt_gp_mem) {
+		pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+	spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
+
+	tg_pt_gp_mem->tg_pt = port;
+	port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
+
+	return tg_pt_gp_mem;
+}
+
+void core_alua_free_tg_pt_gp(
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
+	/*
+	 * Once we have reached this point, config_item_put() has already
+	 * been called from target_core_alua_drop_tg_pt_gp().
+	 *
+	 * Here we remove *tg_pt_gp from the global list so that
+	 * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
+	 * can be made while we are releasing struct t10_alua_tg_pt_gp.
+	 */
+	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+	list_del(&tg_pt_gp->tg_pt_gp_list);
+	su_dev->t10_alua.alua_tg_pt_gps_counter--;
+	spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+	/*
+	 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
+	 * core_alua_get_tg_pt_gp_by_name() in
+	 * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
+	 * to be released with core_alua_put_tg_pt_gp_from_name().
+	 */
+	while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
+		cpu_relax();
+	/*
+	 * Release reference to struct t10_alua_tg_pt_gp from all associated
+	 * struct se_port.
+	 */
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
+			&tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
+		if (tg_pt_gp_mem->tg_pt_gp_assoc) {
+			list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+			tg_pt_gp->tg_pt_gp_members--;
+			tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+		}
+		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+		/*
+		 * tg_pt_gp_mem is associated with a single
+		 * se_port->sep_alua_tg_pt_gp_mem, and is released via
+		 * core_alua_free_tg_pt_gp_mem().
+		 *
+		 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
+		 * assume we want to re-assocate a given tg_pt_gp_mem with
+		 * default_tg_pt_gp.
+		 */
+		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) {
+			__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+					su_dev->t10_alua.default_tg_pt_gp);
+		} else
+			tg_pt_gp_mem->tg_pt_gp = NULL;
+		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	}
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+	kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
+}
+
+void core_alua_free_tg_pt_gp_mem(struct se_port *port)
+{
+	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+	struct t10_alua *alua = &su_dev->t10_alua;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+
+	if (alua->alua_type != SPC3_ALUA_EMULATED)
+		return;
+
+	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+	if (!tg_pt_gp_mem)
+		return;
+
+	while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
+		cpu_relax();
+
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	if (tg_pt_gp) {
+		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+		if (tg_pt_gp_mem->tg_pt_gp_assoc) {
+			list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+			tg_pt_gp->tg_pt_gp_members--;
+			tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+		}
+		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+		tg_pt_gp_mem->tg_pt_gp = NULL;
+	}
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+	kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
+}
+
+static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
+	struct se_subsystem_dev *su_dev,
+	const char *name)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct config_item *ci;
+
+	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+	list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
+			tg_pt_gp_list) {
+		if (!tg_pt_gp->tg_pt_gp_valid_id)
+			continue;
+		ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
+		if (!strcmp(config_item_name(ci), name)) {
+			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+			spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+			return tg_pt_gp;
+		}
+	}
+	spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+
+	return NULL;
+}
+
+static void core_alua_put_tg_pt_gp_from_name(
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+
+	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+	atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+	spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+}
+
+/*
+ * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
+ */
+void __core_alua_attach_tg_pt_gp_mem(
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
+	tg_pt_gp_mem->tg_pt_gp_assoc = 1;
+	list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
+			&tg_pt_gp->tg_pt_gp_mem_list);
+	tg_pt_gp->tg_pt_gp_members++;
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+}
+
+/*
+ * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
+ */
+static void __core_alua_drop_tg_pt_gp_mem(
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+	tg_pt_gp_mem->tg_pt_gp = NULL;
+	tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+	tg_pt_gp->tg_pt_gp_members--;
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+}
+
+ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
+{
+	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+	struct config_item *tg_pt_ci;
+	struct t10_alua *alua = &su_dev->t10_alua;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	ssize_t len = 0;
+
+	if (alua->alua_type != SPC3_ALUA_EMULATED)
+		return len;
+
+	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+	if (!tg_pt_gp_mem)
+		return len;
+
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	if (tg_pt_gp) {
+		tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
+		len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
+			" %hu\nTG Port Primary Access State: %s\nTG Port "
+			"Primary Access Status: %s\nTG Port Secondary Access"
+			" State: %s\nTG Port Secondary Access Status: %s\n",
+			config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
+			core_alua_dump_state(atomic_read(
+					&tg_pt_gp->tg_pt_gp_alua_access_state)),
+			core_alua_dump_status(
+				tg_pt_gp->tg_pt_gp_alua_access_status),
+			(atomic_read(&port->sep_tg_pt_secondary_offline)) ?
+			"Offline" : "None",
+			core_alua_dump_status(port->sep_tg_pt_secondary_stat));
+	}
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+	return len;
+}
+
+ssize_t core_alua_store_tg_pt_gp_info(
+	struct se_port *port,
+	const char *page,
+	size_t count)
+{
+	struct se_portal_group *tpg;
+	struct se_lun *lun;
+	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	unsigned char buf[TG_PT_GROUP_NAME_BUF];
+	int move = 0;
+
+	tpg = port->sep_tpg;
+	lun = port->sep_lun;
+
+	if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
+		pr_warn("SPC3_ALUA_EMULATED not enabled for"
+			" %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+			tpg->se_tpg_tfo->tpg_get_tag(tpg),
+			config_item_name(&lun->lun_group.cg_item));
+		return -EINVAL;
+	}
+
+	if (count > TG_PT_GROUP_NAME_BUF) {
+		pr_err("ALUA Target Port Group alias too large!\n");
+		return -EINVAL;
+	}
+	memset(buf, 0, TG_PT_GROUP_NAME_BUF);
+	memcpy(buf, page, count);
+	/*
+	 * Any ALUA target port group alias besides "NULL" means we will be
+	 * making a new group association.
+	 */
+	if (strcmp(strstrip(buf), "NULL")) {
+		/*
+		 * core_alua_get_tg_pt_gp_by_name() will increment reference to
+		 * struct t10_alua_tg_pt_gp.  This reference is released with
+		 * core_alua_put_tg_pt_gp_from_name() below.
+		 */
+		tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
+					strstrip(buf));
+		if (!tg_pt_gp_new)
+			return -ENODEV;
+	}
+	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+	if (!tg_pt_gp_mem) {
+		if (tg_pt_gp_new)
+			core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
+		pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
+		return -EINVAL;
+	}
+
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	if (tg_pt_gp) {
+		/*
+		 * Clearing an existing tg_pt_gp association, and replacing
+		 * with the default_tg_pt_gp.
+		 */
+		if (!tg_pt_gp_new) {
+			pr_debug("Target_Core_ConfigFS: Moving"
+				" %s/tpgt_%hu/%s from ALUA Target Port Group:"
+				" alua/%s, ID: %hu back to"
+				" default_tg_pt_gp\n",
+				tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+				tpg->se_tpg_tfo->tpg_get_tag(tpg),
+				config_item_name(&lun->lun_group.cg_item),
+				config_item_name(
+					&tg_pt_gp->tg_pt_gp_group.cg_item),
+				tg_pt_gp->tg_pt_gp_id);
+
+			__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
+			__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+					su_dev->t10_alua.default_tg_pt_gp);
+			spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+			return count;
+		}
+		/*
+		 * Removing existing association of tg_pt_gp_mem with tg_pt_gp
+		 */
+		__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
+		move = 1;
+	}
+	/*
+	 * Associate tg_pt_gp_mem with tg_pt_gp_new.
+	 */
+	__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
+		" Target Port Group: alua/%s, ID: %hu\n", (move) ?
+		"Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+		tpg->se_tpg_tfo->tpg_get_tag(tpg),
+		config_item_name(&lun->lun_group.cg_item),
+		config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
+		tg_pt_gp_new->tg_pt_gp_id);
+
+	core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
+	return count;
+}
+
+ssize_t core_alua_show_access_type(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
+	    (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
+		return sprintf(page, "Implict and Explict\n");
+	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
+		return sprintf(page, "Implict\n");
+	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
+		return sprintf(page, "Explict\n");
+	else
+		return sprintf(page, "None\n");
+}
+
+ssize_t core_alua_store_access_type(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract alua_access_type\n");
+		return -EINVAL;
+	}
+	if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
+		pr_err("Illegal value for alua_access_type:"
+				" %lu\n", tmp);
+		return -EINVAL;
+	}
+	if (tmp == 3)
+		tg_pt_gp->tg_pt_gp_alua_access_type =
+			TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
+	else if (tmp == 2)
+		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
+	else if (tmp == 1)
+		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
+	else
+		tg_pt_gp->tg_pt_gp_alua_access_type = 0;
+
+	return count;
+}
+
+ssize_t core_alua_show_nonop_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
+}
+
+ssize_t core_alua_store_nonop_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract nonop_delay_msecs\n");
+		return -EINVAL;
+	}
+	if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
+		pr_err("Passed nonop_delay_msecs: %lu, exceeds"
+			" ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
+			ALUA_MAX_NONOP_DELAY_MSECS);
+		return -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_trans_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+}
+
+ssize_t core_alua_store_trans_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract trans_delay_msecs\n");
+		return -EINVAL;
+	}
+	if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
+		pr_err("Passed trans_delay_msecs: %lu, exceeds"
+			" ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
+			ALUA_MAX_TRANS_DELAY_MSECS);
+		return -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_preferred_bit(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
+}
+
+ssize_t core_alua_store_preferred_bit(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract preferred ALUA value\n");
+		return -EINVAL;
+	}
+	if ((tmp != 0) && (tmp != 1)) {
+		pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
+		return -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_pref = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
+{
+	if (!lun->lun_sep)
+		return -ENODEV;
+
+	return sprintf(page, "%d\n",
+		atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
+}
+
+ssize_t core_alua_store_offline_bit(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	unsigned long tmp;
+	int ret;
+
+	if (!lun->lun_sep)
+		return -ENODEV;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract alua_tg_pt_offline value\n");
+		return -EINVAL;
+	}
+	if ((tmp != 0) && (tmp != 1)) {
+		pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
+				tmp);
+		return -EINVAL;
+	}
+	tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
+	if (!tg_pt_gp_mem) {
+		pr_err("Unable to locate *tg_pt_gp_mem\n");
+		return -EINVAL;
+	}
+
+	ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
+			lun->lun_sep, 0, (int)tmp);
+	if (ret < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+ssize_t core_alua_show_secondary_status(
+	struct se_lun *lun,
+	char *page)
+{
+	return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
+}
+
+ssize_t core_alua_store_secondary_status(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract alua_tg_pt_status\n");
+		return -EINVAL;
+	}
+	if ((tmp != ALUA_STATUS_NONE) &&
+	    (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
+	    (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
+		pr_err("Illegal value for alua_tg_pt_status: %lu\n",
+				tmp);
+		return -EINVAL;
+	}
+	lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_secondary_write_metadata(
+	struct se_lun *lun,
+	char *page)
+{
+	return sprintf(page, "%d\n",
+			lun->lun_sep->sep_tg_pt_secondary_write_md);
+}
+
+ssize_t core_alua_store_secondary_write_metadata(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract alua_tg_pt_write_md\n");
+		return -EINVAL;
+	}
+	if ((tmp != 0) && (tmp != 1)) {
+		pr_err("Illegal value for alua_tg_pt_write_md:"
+				" %lu\n", tmp);
+		return -EINVAL;
+	}
+	lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
+
+	return count;
+}
+
+int core_setup_alua(struct se_device *dev, int force_pt)
+{
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	struct t10_alua *alua = &su_dev->t10_alua;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	/*
+	 * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
+	 * of the Underlying SCSI hardware.  In Linux/SCSI terms, this can
+	 * cause a problem because libata and some SATA RAID HBAs appear
+	 * under Linux/SCSI, but emulate SCSI logic themselves.
+	 */
+	if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
+	    !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
+		alua->alua_type = SPC_ALUA_PASSTHROUGH;
+		alua->alua_state_check = &core_alua_state_check_nop;
+		pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
+			" emulation\n", dev->transport->name);
+		return 0;
+	}
+	/*
+	 * If SPC-3 or above is reported by real or emulated struct se_device,
+	 * use emulated ALUA.
+	 */
+	if (dev->transport->get_device_rev(dev) >= SCSI_3) {
+		pr_debug("%s: Enabling ALUA Emulation for SPC-3"
+			" device\n", dev->transport->name);
+		/*
+		 * Associate this struct se_device with the default ALUA
+		 * LUN Group.
+		 */
+		lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
+		if (IS_ERR(lu_gp_mem))
+			return PTR_ERR(lu_gp_mem);
+
+		alua->alua_type = SPC3_ALUA_EMULATED;
+		alua->alua_state_check = &core_alua_state_check;
+		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+		__core_alua_attach_lu_gp_mem(lu_gp_mem,
+				default_lu_gp);
+		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+		pr_debug("%s: Adding to default ALUA LU Group:"
+			" core/alua/lu_gps/default_lu_gp\n",
+			dev->transport->name);
+	} else {
+		alua->alua_type = SPC2_ALUA_DISABLED;
+		alua->alua_state_check = &core_alua_state_check_nop;
+		pr_debug("%s: Disabling ALUA Emulation for SPC-2"
+			" device\n", dev->transport->name);
+	}
+
+	return 0;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_alua.h b/ap/os/linux/linux-3.4.x/drivers/target/target_core_alua.h
new file mode 100644
index 0000000..c5b4ecd
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_alua.h
@@ -0,0 +1,126 @@
+#ifndef TARGET_CORE_ALUA_H
+#define TARGET_CORE_ALUA_H
+
+/*
+ * INQUIRY response data, TPGS Field
+ *
+ * from spc4r17 section 6.4.2 Table 135
+ */
+#define TPGS_NO_ALUA				0x00
+#define TPGS_IMPLICT_ALUA			0x10
+#define TPGS_EXPLICT_ALUA			0x20
+
+/*
+ * ASYMMETRIC ACCESS STATE field
+ *
+ * from spc4r17 section 6.27 Table 245
+ */
+#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED	0x0
+#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED	0x1
+#define ALUA_ACCESS_STATE_STANDBY		0x2
+#define ALUA_ACCESS_STATE_UNAVAILABLE		0x3
+#define ALUA_ACCESS_STATE_OFFLINE		0xe
+#define ALUA_ACCESS_STATE_TRANSITION		0xf
+
+/*
+ * REPORT_TARGET_PORT_GROUP STATUS CODE
+ *
+ * from spc4r17 section 6.27 Table 246
+ */
+#define ALUA_STATUS_NONE				0x00
+#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG		0x01
+#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA		0x02
+
+/*
+ * From spc4r17, Table D.1: ASC and ASCQ Assignement
+ */
+#define ASCQ_04H_ALUA_STATE_TRANSITION			0x0a
+#define ASCQ_04H_ALUA_TG_PT_STANDBY			0x0b
+#define ASCQ_04H_ALUA_TG_PT_UNAVAILABLE			0x0c
+#define ASCQ_04H_ALUA_OFFLINE				0x12
+
+/*
+ * Used as the default for Active/NonOptimized delay (in milliseconds)
+ * This can also be changed via configfs on a per target port group basis..
+ */
+#define ALUA_DEFAULT_NONOP_DELAY_MSECS			100
+#define ALUA_MAX_NONOP_DELAY_MSECS			10000 /* 10 seconds */
+/*
+ * Used for implict and explict ALUA transitional delay, that is disabled
+ * by default, and is intended to be used for debugging client side ALUA code.
+ */
+#define ALUA_DEFAULT_TRANS_DELAY_MSECS			0
+#define ALUA_MAX_TRANS_DELAY_MSECS			30000 /* 30 seconds */
+/*
+ * Used by core_alua_update_tpg_primary_metadata() and
+ * core_alua_update_tpg_secondary_metadata()
+ */
+#define ALUA_METADATA_PATH_LEN				512
+/*
+ * Used by core_alua_update_tpg_secondary_metadata()
+ */
+#define ALUA_SECONDARY_METADATA_WWN_LEN			256
+
+extern struct kmem_cache *t10_alua_lu_gp_cache;
+extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
+extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
+extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+
+extern int target_emulate_report_target_port_groups(struct se_task *);
+extern int target_emulate_set_target_port_groups(struct se_task *);
+extern int core_alua_check_nonop_delay(struct se_cmd *);
+extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
+				struct se_device *, struct se_port *,
+				struct se_node_acl *, int, int);
+extern char *core_alua_dump_status(int);
+extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
+extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
+extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
+extern void core_alua_free_lu_gp_mem(struct se_device *);
+extern struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *);
+extern void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *);
+extern void __core_alua_attach_lu_gp_mem(struct t10_alua_lu_gp_member *,
+					struct t10_alua_lu_gp *);
+extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
+					struct t10_alua_lu_gp *);
+extern void core_alua_drop_lu_gp_dev(struct se_device *);
+extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
+			struct se_subsystem_dev *, const char *, int);
+extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
+extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
+					struct se_port *);
+extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *);
+extern void core_alua_free_tg_pt_gp_mem(struct se_port *);
+extern void __core_alua_attach_tg_pt_gp_mem(struct t10_alua_tg_pt_gp_member *,
+					struct t10_alua_tg_pt_gp *);
+extern ssize_t core_alua_show_tg_pt_gp_info(struct se_port *, char *);
+extern ssize_t core_alua_store_tg_pt_gp_info(struct se_port *, const char *,
+						size_t);
+extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *);
+extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
+						char *);
+extern ssize_t core_alua_store_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
+					char *);
+extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
+					char *);
+extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_offline_bit(struct se_lun *, char *);
+extern ssize_t core_alua_store_offline_bit(struct se_lun *, const char *,
+					size_t);
+extern ssize_t core_alua_show_secondary_status(struct se_lun *, char *);
+extern ssize_t core_alua_store_secondary_status(struct se_lun *,
+					const char *, size_t);
+extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
+					char *);
+extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
+					const char *, size_t);
+extern int core_setup_alua(struct se_device *, int);
+
+#endif /* TARGET_CORE_ALUA_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_cdb.c b/ap/os/linux/linux-3.4.x/drivers/target/target_core_cdb.c
new file mode 100644
index 0000000..2d88ce8
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_cdb.c
@@ -0,0 +1,1216 @@
+/*
+ * CDB emulation for non-READ/WRITE commands.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+#include "target_core_ua.h"
+
+static void
+target_fill_alua_data(struct se_port *port, unsigned char *buf)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+
+	/*
+	 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
+	 */
+	buf[5]	= 0x80;
+
+	/*
+	 * Set TPGS field for explict and/or implict ALUA access type
+	 * and opteration.
+	 *
+	 * See spc4r17 section 6.4.2 Table 135
+	 */
+	if (!port)
+		return;
+	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+	if (!tg_pt_gp_mem)
+		return;
+
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	if (tg_pt_gp)
+		buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+}
+
+static int
+target_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
+{
+	struct se_lun *lun = cmd->se_lun;
+	struct se_device *dev = cmd->se_dev;
+
+	/* Set RMB (removable media) for tape devices */
+	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
+		buf[1] = 0x80;
+
+	buf[2] = dev->transport->get_device_rev(dev);
+
+	/*
+	 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
+	 *
+	 * SPC4 says:
+	 *   A RESPONSE DATA FORMAT field set to 2h indicates that the
+	 *   standard INQUIRY data is in the format defined in this
+	 *   standard. Response data format values less than 2h are
+	 *   obsolete. Response data format values greater than 2h are
+	 *   reserved.
+	 */
+	buf[3] = 2;
+
+	/*
+	 * Enable SCCS and TPGS fields for Emulated ALUA
+	 */
+	if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
+		target_fill_alua_data(lun->lun_sep, buf);
+
+	buf[7] = 0x2; /* CmdQue=1 */
+
+	memcpy(&buf[8], "LIO-ORG ", 8);
+	memset(&buf[16], 0x20, 16);
+	memcpy(&buf[16], dev->se_sub_dev->t10_wwn.model,
+	       min_t(size_t, strlen(dev->se_sub_dev->t10_wwn.model), 16));
+	memcpy(&buf[32], dev->se_sub_dev->t10_wwn.revision,
+	       min_t(size_t, strlen(dev->se_sub_dev->t10_wwn.revision), 4));
+	buf[4] = 31; /* Set additional length to 31 */
+
+	return 0;
+}
+
+/* unit serial number */
+static int
+target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = cmd->se_dev;
+	u16 len = 0;
+
+	if (dev->se_sub_dev->su_dev_flags &
+			SDF_EMULATED_VPD_UNIT_SERIAL) {
+		u32 unit_serial_len;
+
+		unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial);
+		unit_serial_len++; /* For NULL Terminator */
+
+		len += sprintf(&buf[4], "%s",
+			dev->se_sub_dev->t10_wwn.unit_serial);
+		len++; /* Extra Byte for NULL Terminator */
+		buf[3] = len;
+	}
+	return 0;
+}
+
+static void
+target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf)
+{
+	unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
+	int cnt;
+	bool next = true;
+
+	/*
+	 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
+	 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
+	 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
+	 * to complete the payload.  These are based from VPD=0x80 PRODUCT SERIAL
+	 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
+	 * per device uniqeness.
+	 */
+	for (cnt = 0; *p && cnt < 13; p++) {
+		int val = hex_to_bin(*p);
+
+		if (val < 0)
+			continue;
+
+		if (next) {
+			next = false;
+			buf[cnt++] |= val;
+		} else {
+			next = true;
+			buf[cnt] = val << 4;
+		}
+	}
+}
+
+/*
+ * Device identification VPD, for a complete list of
+ * DESIGNATOR TYPEs see spc4r17 Table 459.
+ */
+static int
+target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_lun *lun = cmd->se_lun;
+	struct se_port *port = NULL;
+	struct se_portal_group *tpg = NULL;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0];
+	u32 prod_len;
+	u32 unit_serial_len, off = 0;
+	u16 len = 0, id_len;
+
+	off = 4;
+
+	/*
+	 * NAA IEEE Registered Extended Assigned designator format, see
+	 * spc4r17 section 7.7.3.6.5
+	 *
+	 * We depend upon a target_core_mod/ConfigFS provided
+	 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
+	 * value in order to return the NAA id.
+	 */
+	if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL))
+		goto check_t10_vend_desc;
+
+	/* CODE SET == Binary */
+	buf[off++] = 0x1;
+
+	/* Set ASSOCIATION == addressed logical unit: 0)b */
+	buf[off] = 0x00;
+
+	/* Identifier/Designator type == NAA identifier */
+	buf[off++] |= 0x3;
+	off++;
+
+	/* Identifier/Designator length */
+	buf[off++] = 0x10;
+
+	/*
+	 * Start NAA IEEE Registered Extended Identifier/Designator
+	 */
+	buf[off++] = (0x6 << 4);
+
+	/*
+	 * Use OpenFabrics IEEE Company ID: 00 14 05
+	 */
+	buf[off++] = 0x01;
+	buf[off++] = 0x40;
+	buf[off] = (0x5 << 4);
+
+	/*
+	 * Return ConfigFS Unit Serial Number information for
+	 * VENDOR_SPECIFIC_IDENTIFIER and
+	 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
+	 */
+	target_parse_naa_6h_vendor_specific(dev, &buf[off]);
+
+	len = 20;
+	off = (len + 4);
+
+check_t10_vend_desc:
+	/*
+	 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
+	 */
+	id_len = 8; /* For Vendor field */
+	prod_len = 4; /* For VPD Header */
+	prod_len += 8; /* For Vendor field */
+	prod_len += strlen(prod);
+	prod_len++; /* For : */
+
+	if (dev->se_sub_dev->su_dev_flags &
+			SDF_EMULATED_VPD_UNIT_SERIAL) {
+		unit_serial_len =
+			strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
+		unit_serial_len++; /* For NULL Terminator */
+
+		id_len += sprintf(&buf[off+12], "%s:%s", prod,
+				&dev->se_sub_dev->t10_wwn.unit_serial[0]);
+	}
+	buf[off] = 0x2; /* ASCII */
+	buf[off+1] = 0x1; /* T10 Vendor ID */
+	buf[off+2] = 0x0;
+	memcpy(&buf[off+4], "LIO-ORG", 8);
+	/* Extra Byte for NULL Terminator */
+	id_len++;
+	/* Identifier Length */
+	buf[off+3] = id_len;
+	/* Header size for Designation descriptor */
+	len += (id_len + 4);
+	off += (id_len + 4);
+	/*
+	 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
+	 */
+	port = lun->lun_sep;
+	if (port) {
+		struct t10_alua_lu_gp *lu_gp;
+		u32 padding, scsi_name_len;
+		u16 lu_gp_id = 0;
+		u16 tg_pt_gp_id = 0;
+		u16 tpgt;
+
+		tpg = port->sep_tpg;
+		/*
+		 * Relative target port identifer, see spc4r17
+		 * section 7.7.3.7
+		 *
+		 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+		 * section 7.5.1 Table 362
+		 */
+		buf[off] =
+			(tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+		buf[off++] |= 0x1; /* CODE SET == Binary */
+		buf[off] = 0x80; /* Set PIV=1 */
+		/* Set ASSOCIATION == target port: 01b */
+		buf[off] |= 0x10;
+		/* DESIGNATOR TYPE == Relative target port identifer */
+		buf[off++] |= 0x4;
+		off++; /* Skip over Reserved */
+		buf[off++] = 4; /* DESIGNATOR LENGTH */
+		/* Skip over Obsolete field in RTPI payload
+		 * in Table 472 */
+		off += 2;
+		buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
+		buf[off++] = (port->sep_rtpi & 0xff);
+		len += 8; /* Header size + Designation descriptor */
+		/*
+		 * Target port group identifier, see spc4r17
+		 * section 7.7.3.8
+		 *
+		 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+		 * section 7.5.1 Table 362
+		 */
+		if (dev->se_sub_dev->t10_alua.alua_type !=
+				SPC3_ALUA_EMULATED)
+			goto check_scsi_name;
+
+		tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+		if (!tg_pt_gp_mem)
+			goto check_lu_gp;
+
+		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+		if (!tg_pt_gp) {
+			spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+			goto check_lu_gp;
+		}
+		tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
+		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+		buf[off] =
+			(tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+		buf[off++] |= 0x1; /* CODE SET == Binary */
+		buf[off] = 0x80; /* Set PIV=1 */
+		/* Set ASSOCIATION == target port: 01b */
+		buf[off] |= 0x10;
+		/* DESIGNATOR TYPE == Target port group identifier */
+		buf[off++] |= 0x5;
+		off++; /* Skip over Reserved */
+		buf[off++] = 4; /* DESIGNATOR LENGTH */
+		off += 2; /* Skip over Reserved Field */
+		buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
+		buf[off++] = (tg_pt_gp_id & 0xff);
+		len += 8; /* Header size + Designation descriptor */
+		/*
+		 * Logical Unit Group identifier, see spc4r17
+		 * section 7.7.3.8
+		 */
+check_lu_gp:
+		lu_gp_mem = dev->dev_alua_lu_gp_mem;
+		if (!lu_gp_mem)
+			goto check_scsi_name;
+
+		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+		lu_gp = lu_gp_mem->lu_gp;
+		if (!lu_gp) {
+			spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+			goto check_scsi_name;
+		}
+		lu_gp_id = lu_gp->lu_gp_id;
+		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+		buf[off++] |= 0x1; /* CODE SET == Binary */
+		/* DESIGNATOR TYPE == Logical Unit Group identifier */
+		buf[off++] |= 0x6;
+		off++; /* Skip over Reserved */
+		buf[off++] = 4; /* DESIGNATOR LENGTH */
+		off += 2; /* Skip over Reserved Field */
+		buf[off++] = ((lu_gp_id >> 8) & 0xff);
+		buf[off++] = (lu_gp_id & 0xff);
+		len += 8; /* Header size + Designation descriptor */
+		/*
+		 * SCSI name string designator, see spc4r17
+		 * section 7.7.3.11
+		 *
+		 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+		 * section 7.5.1 Table 362
+		 */
+check_scsi_name:
+		scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg));
+		/* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
+		scsi_name_len += 10;
+		/* Check for 4-byte padding */
+		padding = ((-scsi_name_len) & 3);
+		if (padding != 0)
+			scsi_name_len += padding;
+		/* Header size + Designation descriptor */
+		scsi_name_len += 4;
+
+		buf[off] =
+			(tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+		buf[off++] |= 0x3; /* CODE SET == UTF-8 */
+		buf[off] = 0x80; /* Set PIV=1 */
+		/* Set ASSOCIATION == target port: 01b */
+		buf[off] |= 0x10;
+		/* DESIGNATOR TYPE == SCSI name string */
+		buf[off++] |= 0x8;
+		off += 2; /* Skip over Reserved and length */
+		/*
+		 * SCSI name string identifer containing, $FABRIC_MOD
+		 * dependent information.  For LIO-Target and iSCSI
+		 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
+		 * UTF-8 encoding.
+		 */
+		tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
+		scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
+					tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
+		scsi_name_len += 1 /* Include  NULL terminator */;
+		/*
+		 * The null-terminated, null-padded (see 4.4.2) SCSI
+		 * NAME STRING field contains a UTF-8 format string.
+		 * The number of bytes in the SCSI NAME STRING field
+		 * (i.e., the value in the DESIGNATOR LENGTH field)
+		 * shall be no larger than 256 and shall be a multiple
+		 * of four.
+		 */
+		if (padding)
+			scsi_name_len += padding;
+
+		buf[off-1] = scsi_name_len;
+		off += scsi_name_len;
+		/* Header size + Designation descriptor */
+		len += (scsi_name_len + 4);
+	}
+	buf[2] = ((len >> 8) & 0xff);
+	buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
+	return 0;
+}
+
+/* Extended INQUIRY Data VPD Page */
+static int
+target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
+{
+	buf[3] = 0x3c;
+	/* Set HEADSUP, ORDSUP, SIMPSUP */
+	buf[5] = 0x07;
+
+	/* If WriteCache emulation is enabled, set V_SUP */
+	if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
+		buf[6] = 0x01;
+	return 0;
+}
+
+/* Block Limits VPD page */
+static int
+target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = cmd->se_dev;
+	int have_tp = 0;
+
+	/*
+	 * Following sbc3r22 section 6.5.3 Block Limits VPD page, when
+	 * emulate_tpu=1 or emulate_tpws=1 we will be expect a
+	 * different page length for Thin Provisioning.
+	 */
+	if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
+		have_tp = 1;
+
+	buf[0] = dev->transport->get_device_type(dev);
+	buf[3] = have_tp ? 0x3c : 0x10;
+
+	/* Set WSNZ to 1 */
+	buf[4] = 0x01;
+
+	/*
+	 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
+	 */
+	put_unaligned_be16(1, &buf[6]);
+
+	/*
+	 * Set MAXIMUM TRANSFER LENGTH
+	 */
+	put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors, &buf[8]);
+
+	/*
+	 * Set OPTIMAL TRANSFER LENGTH
+	 */
+	put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]);
+
+	/*
+	 * Exit now if we don't support TP.
+	 */
+	if (!have_tp)
+		return 0;
+
+	/*
+	 * Set MAXIMUM UNMAP LBA COUNT
+	 */
+	put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]);
+
+	/*
+	 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
+	 */
+	put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count,
+			   &buf[24]);
+
+	/*
+	 * Set OPTIMAL UNMAP GRANULARITY
+	 */
+	put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]);
+
+	/*
+	 * UNMAP GRANULARITY ALIGNMENT
+	 */
+	put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment,
+			   &buf[32]);
+	if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0)
+		buf[32] |= 0x80; /* Set the UGAVALID bit */
+
+	return 0;
+}
+
+/* Block Device Characteristics VPD page */
+static int
+target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = cmd->se_dev;
+
+	buf[0] = dev->transport->get_device_type(dev);
+	buf[3] = 0x3c;
+	buf[5] = dev->se_sub_dev->se_dev_attrib.is_nonrot ? 1 : 0;
+
+	return 0;
+}
+
+/* Thin Provisioning VPD */
+static int
+target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = cmd->se_dev;
+
+	/*
+	 * From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
+	 *
+	 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
+	 * zero, then the page length shall be set to 0004h.  If the DP bit
+	 * is set to one, then the page length shall be set to the value
+	 * defined in table 162.
+	 */
+	buf[0] = dev->transport->get_device_type(dev);
+
+	/*
+	 * Set Hardcoded length mentioned above for DP=0
+	 */
+	put_unaligned_be16(0x0004, &buf[2]);
+
+	/*
+	 * The THRESHOLD EXPONENT field indicates the threshold set size in
+	 * LBAs as a power of 2 (i.e., the threshold set size is equal to
+	 * 2(threshold exponent)).
+	 *
+	 * Note that this is currently set to 0x00 as mkp says it will be
+	 * changing again.  We can enable this once it has settled in T10
+	 * and is actually used by Linux/SCSI ML code.
+	 */
+	buf[4] = 0x00;
+
+	/*
+	 * A TPU bit set to one indicates that the device server supports
+	 * the UNMAP command (see 5.25). A TPU bit set to zero indicates
+	 * that the device server does not support the UNMAP command.
+	 */
+	if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0)
+		buf[5] = 0x80;
+
+	/*
+	 * A TPWS bit set to one indicates that the device server supports
+	 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
+	 * A TPWS bit set to zero indicates that the device server does not
+	 * support the use of the WRITE SAME (16) command to unmap LBAs.
+	 */
+	if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0)
+		buf[5] |= 0x40;
+
+	return 0;
+}
+
+static int
+target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
+
+static struct {
+	uint8_t		page;
+	int		(*emulate)(struct se_cmd *, unsigned char *);
+} evpd_handlers[] = {
+	{ .page = 0x00, .emulate = target_emulate_evpd_00 },
+	{ .page = 0x80, .emulate = target_emulate_evpd_80 },
+	{ .page = 0x83, .emulate = target_emulate_evpd_83 },
+	{ .page = 0x86, .emulate = target_emulate_evpd_86 },
+	{ .page = 0xb0, .emulate = target_emulate_evpd_b0 },
+	{ .page = 0xb1, .emulate = target_emulate_evpd_b1 },
+	{ .page = 0xb2, .emulate = target_emulate_evpd_b2 },
+};
+
+/* supported vital product data pages */
+static int
+target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
+{
+	int p;
+
+	/*
+	 * Only report the INQUIRY EVPD=1 pages after a valid NAA
+	 * Registered Extended LUN WWN has been set via ConfigFS
+	 * during device creation/restart.
+	 */
+	if (cmd->se_dev->se_sub_dev->su_dev_flags &
+			SDF_EMULATED_VPD_UNIT_SERIAL) {
+		buf[3] = ARRAY_SIZE(evpd_handlers);
+		for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
+			buf[p + 4] = evpd_handlers[p].page;
+	}
+
+	return 0;
+}
+
+int target_emulate_inquiry(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct se_device *dev = cmd->se_dev;
+	struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
+	unsigned char *buf, *map_buf;
+	unsigned char *cdb = cmd->t_task_cdb;
+	int p, ret;
+
+	map_buf = transport_kmap_data_sg(cmd);
+	/*
+	 * If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we
+	 * know we actually allocated a full page.  Otherwise, if the
+	 * data buffer is too small, allocate a temporary buffer so we
+	 * don't have to worry about overruns in all our INQUIRY
+	 * emulation handling.
+	 */
+	if (cmd->data_length < SE_INQUIRY_BUF &&
+	    (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
+		buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
+		if (!buf) {
+			transport_kunmap_data_sg(cmd);
+			cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+			return -ENOMEM;
+		}
+	} else {
+		buf = map_buf;
+	}
+
+	if (dev == tpg->tpg_virt_lun0.lun_se_dev)
+		buf[0] = 0x3f; /* Not connected */
+	else
+		buf[0] = dev->transport->get_device_type(dev);
+
+	if (!(cdb[1] & 0x1)) {
+		if (cdb[2]) {
+			pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
+			       cdb[2]);
+			cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+			ret = -EINVAL;
+			goto out;
+		}
+
+		ret = target_emulate_inquiry_std(cmd, buf);
+		goto out;
+	}
+
+	for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) {
+		if (cdb[2] == evpd_handlers[p].page) {
+			buf[1] = cdb[2];
+			ret = evpd_handlers[p].emulate(cmd, buf);
+			goto out;
+		}
+	}
+
+	pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+	cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+	ret = -EINVAL;
+
+out:
+	if (buf != map_buf) {
+		memcpy(map_buf, buf, cmd->data_length);
+		kfree(buf);
+	}
+	transport_kunmap_data_sg(cmd);
+
+	if (!ret) {
+		task->task_scsi_status = GOOD;
+		transport_complete_task(task, 1);
+	}
+	return ret;
+}
+
+int target_emulate_readcapacity(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct se_device *dev = cmd->se_dev;
+	unsigned char *buf;
+	unsigned long long blocks_long = dev->transport->get_blocks(dev);
+	u32 blocks;
+
+	if (blocks_long >= 0x00000000ffffffff)
+		blocks = 0xffffffff;
+	else
+		blocks = (u32)blocks_long;
+
+	buf = transport_kmap_data_sg(cmd);
+
+	buf[0] = (blocks >> 24) & 0xff;
+	buf[1] = (blocks >> 16) & 0xff;
+	buf[2] = (blocks >> 8) & 0xff;
+	buf[3] = blocks & 0xff;
+	buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
+	buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
+	buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
+	buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
+
+	transport_kunmap_data_sg(cmd);
+
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+	return 0;
+}
+
+int target_emulate_readcapacity_16(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct se_device *dev = cmd->se_dev;
+	unsigned char *buf;
+	unsigned long long blocks = dev->transport->get_blocks(dev);
+
+	buf = transport_kmap_data_sg(cmd);
+
+	buf[0] = (blocks >> 56) & 0xff;
+	buf[1] = (blocks >> 48) & 0xff;
+	buf[2] = (blocks >> 40) & 0xff;
+	buf[3] = (blocks >> 32) & 0xff;
+	buf[4] = (blocks >> 24) & 0xff;
+	buf[5] = (blocks >> 16) & 0xff;
+	buf[6] = (blocks >> 8) & 0xff;
+	buf[7] = blocks & 0xff;
+	buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
+	buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
+	buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
+	buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
+	/*
+	 * Set Thin Provisioning Enable bit following sbc3r22 in section
+	 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
+	 */
+	if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
+		buf[14] = 0x80;
+
+	transport_kunmap_data_sg(cmd);
+
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+	return 0;
+}
+
+static int
+target_modesense_rwrecovery(unsigned char *p)
+{
+	p[0] = 0x01;
+	p[1] = 0x0a;
+
+	return 12;
+}
+
+static int
+target_modesense_control(struct se_device *dev, unsigned char *p)
+{
+	p[0] = 0x0a;
+	p[1] = 0x0a;
+	p[2] = 2;
+	/*
+	 * From spc4r23, 7.4.7 Control mode page
+	 *
+	 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
+	 * restrictions on the algorithm used for reordering commands
+	 * having the SIMPLE task attribute (see SAM-4).
+	 *
+	 *                    Table 368 -- QUEUE ALGORITHM MODIFIER field
+	 *                         Code      Description
+	 *                          0h       Restricted reordering
+	 *                          1h       Unrestricted reordering allowed
+	 *                          2h to 7h    Reserved
+	 *                          8h to Fh    Vendor specific
+	 *
+	 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
+	 * the device server shall order the processing sequence of commands
+	 * having the SIMPLE task attribute such that data integrity is maintained
+	 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
+	 * requests is halted at any time, the final value of all data observable
+	 * on the medium shall be the same as if all the commands had been processed
+	 * with the ORDERED task attribute).
+	 *
+	 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
+	 * device server may reorder the processing sequence of commands having the
+	 * SIMPLE task attribute in any manner. Any data integrity exposures related to
+	 * command sequence order shall be explicitly handled by the application client
+	 * through the selection of appropriate ommands and task attributes.
+	 */
+	p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
+	/*
+	 * From spc4r17, section 7.4.6 Control mode Page
+	 *
+	 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
+	 *
+	 * 00b: The logical unit shall clear any unit attention condition
+	 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+	 * status and shall not establish a unit attention condition when a com-
+	 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
+	 * status.
+	 *
+	 * 10b: The logical unit shall not clear any unit attention condition
+	 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+	 * status and shall not establish a unit attention condition when
+	 * a command is completed with BUSY, TASK SET FULL, or RESERVATION
+	 * CONFLICT status.
+	 *
+	 * 11b a The logical unit shall not clear any unit attention condition
+	 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+	 * status and shall establish a unit attention condition for the
+	 * initiator port associated with the I_T nexus on which the BUSY,
+	 * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
+	 * Depending on the status, the additional sense code shall be set to
+	 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
+	 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
+	 * command, a unit attention condition shall be established only once
+	 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
+	 * to the number of commands completed with one of those status codes.
+	 */
+	p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
+	       (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
+	/*
+	 * From spc4r17, section 7.4.6 Control mode Page
+	 *
+	 * Task Aborted Status (TAS) bit set to zero.
+	 *
+	 * A task aborted status (TAS) bit set to zero specifies that aborted
+	 * tasks shall be terminated by the device server without any response
+	 * to the application client. A TAS bit set to one specifies that tasks
+	 * aborted by the actions of an I_T nexus other than the I_T nexus on
+	 * which the command was received shall be completed with TASK ABORTED
+	 * status (see SAM-4).
+	 */
+	p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00;
+	p[8] = 0xff;
+	p[9] = 0xff;
+	p[11] = 30;
+
+	return 12;
+}
+
+static int
+target_modesense_caching(struct se_device *dev, unsigned char *p)
+{
+	p[0] = 0x08;
+	p[1] = 0x12;
+	if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
+		p[2] = 0x04; /* Write Cache Enable */
+	p[12] = 0x20; /* Disabled Read Ahead */
+
+	return 20;
+}
+
+static void
+target_modesense_write_protect(unsigned char *buf, int type)
+{
+	/*
+	 * I believe that the WP bit (bit 7) in the mode header is the same for
+	 * all device types..
+	 */
+	switch (type) {
+	case TYPE_DISK:
+	case TYPE_TAPE:
+	default:
+		buf[0] |= 0x80; /* WP bit */
+		break;
+	}
+}
+
+static void
+target_modesense_dpofua(unsigned char *buf, int type)
+{
+	switch (type) {
+	case TYPE_DISK:
+		buf[0] |= 0x10; /* DPOFUA bit */
+		break;
+	default:
+		break;
+	}
+}
+
+int target_emulate_modesense(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct se_device *dev = cmd->se_dev;
+	char *cdb = cmd->t_task_cdb;
+	unsigned char *rbuf;
+	int type = dev->transport->get_device_type(dev);
+	int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
+	int offset = ten ? 8 : 4;
+	int length = 0;
+	unsigned char buf[SE_MODE_PAGE_BUF];
+
+	memset(buf, 0, SE_MODE_PAGE_BUF);
+
+	switch (cdb[2] & 0x3f) {
+	case 0x01:
+		length = target_modesense_rwrecovery(&buf[offset]);
+		break;
+	case 0x08:
+		length = target_modesense_caching(dev, &buf[offset]);
+		break;
+	case 0x0a:
+		length = target_modesense_control(dev, &buf[offset]);
+		break;
+	case 0x3f:
+		length = target_modesense_rwrecovery(&buf[offset]);
+		length += target_modesense_caching(dev, &buf[offset+length]);
+		length += target_modesense_control(dev, &buf[offset+length]);
+		break;
+	default:
+		pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
+		       cdb[2] & 0x3f, cdb[3]);
+		cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
+		return -EINVAL;
+	}
+	offset += length;
+
+	if (ten) {
+		offset -= 2;
+		buf[0] = (offset >> 8) & 0xff;
+		buf[1] = offset & 0xff;
+
+		if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+		    (cmd->se_deve &&
+		    (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
+			target_modesense_write_protect(&buf[3], type);
+
+		if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
+		    (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
+			target_modesense_dpofua(&buf[3], type);
+
+		if ((offset + 2) > cmd->data_length)
+			offset = cmd->data_length;
+
+	} else {
+		offset -= 1;
+		buf[0] = offset & 0xff;
+
+		if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+		    (cmd->se_deve &&
+		    (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
+			target_modesense_write_protect(&buf[2], type);
+
+		if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
+		    (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
+			target_modesense_dpofua(&buf[2], type);
+
+		if ((offset + 1) > cmd->data_length)
+			offset = cmd->data_length;
+	}
+
+	rbuf = transport_kmap_data_sg(cmd);
+	memcpy(rbuf, buf, offset);
+	transport_kunmap_data_sg(cmd);
+
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+	return 0;
+}
+
+int target_emulate_request_sense(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	unsigned char *cdb = cmd->t_task_cdb;
+	unsigned char *buf;
+	u8 ua_asc = 0, ua_ascq = 0;
+	int err = 0;
+
+	if (cdb[1] & 0x01) {
+		pr_err("REQUEST_SENSE description emulation not"
+			" supported\n");
+		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+		return -ENOSYS;
+	}
+
+	buf = transport_kmap_data_sg(cmd);
+
+	if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
+		/*
+		 * CURRENT ERROR, UNIT ATTENTION
+		 */
+		buf[0] = 0x70;
+		buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
+
+		if (cmd->data_length < 18) {
+			buf[7] = 0x00;
+			err = -EINVAL;
+			goto end;
+		}
+		/*
+		 * The Additional Sense Code (ASC) from the UNIT ATTENTION
+		 */
+		buf[SPC_ASC_KEY_OFFSET] = ua_asc;
+		buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
+		buf[7] = 0x0A;
+	} else {
+		/*
+		 * CURRENT ERROR, NO SENSE
+		 */
+		buf[0] = 0x70;
+		buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
+
+		if (cmd->data_length < 18) {
+			buf[7] = 0x00;
+			err = -EINVAL;
+			goto end;
+		}
+		/*
+		 * NO ADDITIONAL SENSE INFORMATION
+		 */
+		buf[SPC_ASC_KEY_OFFSET] = 0x00;
+		buf[7] = 0x0A;
+	}
+
+end:
+	transport_kunmap_data_sg(cmd);
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+	return 0;
+}
+
+/*
+ * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
+ * Note this is not used for TCM/pSCSI passthrough
+ */
+int target_emulate_unmap(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct se_device *dev = cmd->se_dev;
+	unsigned char *buf, *ptr = NULL;
+	sector_t lba;
+	int size = cmd->data_length;
+	u32 range;
+	int ret = 0;
+	int dl, bd_dl;
+
+	if (!dev->transport->do_discard) {
+		pr_err("UNMAP emulation not supported for: %s\n",
+				dev->transport->name);
+		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+		return -ENOSYS;
+	}
+
+	buf = transport_kmap_data_sg(cmd);
+
+	dl = get_unaligned_be16(&buf[0]);
+	bd_dl = get_unaligned_be16(&buf[2]);
+
+	size = min(size - 8, bd_dl);
+	if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* First UNMAP block descriptor starts at 8 byte offset */
+	ptr = &buf[8];
+	pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
+		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
+
+	while (size >= 16) {
+		lba = get_unaligned_be64(&ptr[0]);
+		range = get_unaligned_be32(&ptr[8]);
+		pr_debug("UNMAP: Using lba: %llu and range: %u\n",
+				 (unsigned long long)lba, range);
+
+		if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) {
+			cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+			ret = -EINVAL;
+			goto err;
+		}
+
+		if (lba + range > dev->transport->get_blocks(dev) + 1) {
+			cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE;
+			ret = -EINVAL;
+			goto err;
+		}
+
+		ret = dev->transport->do_discard(dev, lba, range);
+		if (ret < 0) {
+			pr_err("blkdev_issue_discard() failed: %d\n",
+					ret);
+			goto err;
+		}
+
+		ptr += 16;
+		size -= 16;
+	}
+
+err:
+	transport_kunmap_data_sg(cmd);
+	if (!ret) {
+		task->task_scsi_status = GOOD;
+		transport_complete_task(task, 1);
+	}
+	return ret;
+}
+
+/*
+ * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
+ * Note this is not used for TCM/pSCSI passthrough
+ */
+int target_emulate_write_same(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct se_device *dev = cmd->se_dev;
+	sector_t range;
+	sector_t lba = cmd->t_task_lba;
+	u32 num_blocks;
+	int ret;
+
+	if (!dev->transport->do_discard) {
+		pr_err("WRITE_SAME emulation not supported"
+				" for: %s\n", dev->transport->name);
+		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+		return -ENOSYS;
+	}
+
+	if (cmd->t_task_cdb[0] == WRITE_SAME)
+		num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
+	else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
+		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
+	else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
+		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
+
+	/*
+	 * Use the explicit range when non zero is supplied, otherwise calculate
+	 * the remaining range based on ->get_blocks() - starting LBA.
+	 */
+	if (num_blocks != 0)
+		range = num_blocks;
+	else
+		range = (dev->transport->get_blocks(dev) - lba) + 1;
+
+	pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
+		 (unsigned long long)lba, (unsigned long long)range);
+
+	ret = dev->transport->do_discard(dev, lba, range);
+	if (ret < 0) {
+		pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
+		return ret;
+	}
+
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+	return 0;
+}
+
+int target_emulate_synchronize_cache(struct se_task *task)
+{
+	struct se_device *dev = task->task_se_cmd->se_dev;
+	struct se_cmd *cmd = task->task_se_cmd;
+
+	if (!dev->transport->do_sync_cache) {
+		pr_err("SYNCHRONIZE_CACHE emulation not supported"
+			" for: %s\n", dev->transport->name);
+		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+		return -ENOSYS;
+	}
+
+	dev->transport->do_sync_cache(task);
+	return 0;
+}
+
+int target_emulate_noop(struct se_task *task)
+{
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+	return 0;
+}
+
+/*
+ * Write a CDB into @cdb that is based on the one the intiator sent us,
+ * but updated to only cover the sectors that the current task handles.
+ */
+void target_get_task_cdb(struct se_task *task, unsigned char *cdb)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	unsigned int cdb_len = scsi_command_size(cmd->t_task_cdb);
+
+	memcpy(cdb, cmd->t_task_cdb, cdb_len);
+	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
+		unsigned long long lba = task->task_lba;
+		u32 sectors = task->task_sectors;
+
+		switch (cdb_len) {
+		case 6:
+			/* 21-bit LBA and 8-bit sectors */
+			cdb[1] = (lba >> 16) & 0x1f;
+			cdb[2] = (lba >> 8) & 0xff;
+			cdb[3] = lba & 0xff;
+			cdb[4] = sectors & 0xff;
+			break;
+		case 10:
+			/* 32-bit LBA and 16-bit sectors */
+			put_unaligned_be32(lba, &cdb[2]);
+			put_unaligned_be16(sectors, &cdb[7]);
+			break;
+		case 12:
+			/* 32-bit LBA and 32-bit sectors */
+			put_unaligned_be32(lba, &cdb[2]);
+			put_unaligned_be32(sectors, &cdb[6]);
+			break;
+		case 16:
+			/* 64-bit LBA and 32-bit sectors */
+			put_unaligned_be64(lba, &cdb[2]);
+			put_unaligned_be32(sectors, &cdb[10]);
+			break;
+		case 32:
+			/* 64-bit LBA and 32-bit sectors, extended CDB */
+			put_unaligned_be64(lba, &cdb[12]);
+			put_unaligned_be32(sectors, &cdb[28]);
+			break;
+		default:
+			BUG();
+		}
+	}
+}
+EXPORT_SYMBOL(target_get_task_cdb);
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_configfs.c b/ap/os/linux/linux-3.4.x/drivers/target/target_core_configfs.c
new file mode 100644
index 0000000..dbcede3
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_configfs.c
@@ -0,0 +1,3284 @@
+/*******************************************************************************
+ * Filename:  target_core_configfs.c
+ *
+ * This file contains ConfigFS logic for the Generic Target Engine project.
+ *
+ * Copyright (c) 2008-2011 Rising Tide Systems
+ * Copyright (c) 2008-2011 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * based on configfs Copyright (C) 2005 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/syscalls.h>
+#include <linux/configfs.h>
+#include <linux/spinlock.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "target_core_internal.h"
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+#include "target_core_rd.h"
+
+extern struct t10_alua_lu_gp *default_lu_gp;
+
+static LIST_HEAD(g_tf_list);
+static DEFINE_MUTEX(g_tf_lock);
+
+struct target_core_configfs_attribute {
+	struct configfs_attribute attr;
+	ssize_t (*show)(void *, char *);
+	ssize_t (*store)(void *, const char *, size_t);
+};
+
+static struct config_group target_core_hbagroup;
+static struct config_group alua_group;
+static struct config_group alua_lu_gps_group;
+
+static inline struct se_hba *
+item_to_hba(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct se_hba, hba_group);
+}
+
+/*
+ * Attributes for /sys/kernel/config/target/
+ */
+static ssize_t target_core_attr_show(struct config_item *item,
+				      struct configfs_attribute *attr,
+				      char *page)
+{
+	return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
+		" on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION,
+		utsname()->sysname, utsname()->machine);
+}
+
+static struct configfs_item_operations target_core_fabric_item_ops = {
+	.show_attribute = target_core_attr_show,
+};
+
+static struct configfs_attribute target_core_item_attr_version = {
+	.ca_owner	= THIS_MODULE,
+	.ca_name	= "version",
+	.ca_mode	= S_IRUGO,
+};
+
+static struct target_fabric_configfs *target_core_get_fabric(
+	const char *name)
+{
+	struct target_fabric_configfs *tf;
+
+	if (!name)
+		return NULL;
+
+	mutex_lock(&g_tf_lock);
+	list_for_each_entry(tf, &g_tf_list, tf_list) {
+		if (!strcmp(tf->tf_name, name)) {
+			atomic_inc(&tf->tf_access_cnt);
+			mutex_unlock(&g_tf_lock);
+			return tf;
+		}
+	}
+	mutex_unlock(&g_tf_lock);
+
+	return NULL;
+}
+
+/*
+ * Called from struct target_core_group_ops->make_group()
+ */
+static struct config_group *target_core_register_fabric(
+	struct config_group *group,
+	const char *name)
+{
+	struct target_fabric_configfs *tf;
+	int ret;
+
+	pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
+			" %s\n", group, name);
+	/*
+	 * Below are some hardcoded request_module() calls to automatically
+	 * local fabric modules when the following is called:
+	 *
+	 * mkdir -p /sys/kernel/config/target/$MODULE_NAME
+	 *
+	 * Note that this does not limit which TCM fabric module can be
+	 * registered, but simply provids auto loading logic for modules with
+	 * mkdir(2) system calls with known TCM fabric modules.
+	 */
+	if (!strncmp(name, "iscsi", 5)) {
+		/*
+		 * Automatically load the LIO Target fabric module when the
+		 * following is called:
+		 *
+		 * mkdir -p $CONFIGFS/target/iscsi
+		 */
+		ret = request_module("iscsi_target_mod");
+		if (ret < 0) {
+			pr_err("request_module() failed for"
+				" iscsi_target_mod.ko: %d\n", ret);
+			return ERR_PTR(-EINVAL);
+		}
+	} else if (!strncmp(name, "loopback", 8)) {
+		/*
+		 * Automatically load the tcm_loop fabric module when the
+		 * following is called:
+		 *
+		 * mkdir -p $CONFIGFS/target/loopback
+		 */
+		ret = request_module("tcm_loop");
+		if (ret < 0) {
+			pr_err("request_module() failed for"
+				" tcm_loop.ko: %d\n", ret);
+			return ERR_PTR(-EINVAL);
+		}
+	}
+
+	tf = target_core_get_fabric(name);
+	if (!tf) {
+		pr_err("target_core_get_fabric() failed for %s\n",
+			name);
+		return ERR_PTR(-EINVAL);
+	}
+	pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
+			" %s\n", tf->tf_name);
+	/*
+	 * On a successful target_core_get_fabric() look, the returned
+	 * struct target_fabric_configfs *tf will contain a usage reference.
+	 */
+	pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
+			&TF_CIT_TMPL(tf)->tfc_wwn_cit);
+
+	tf->tf_group.default_groups = tf->tf_default_groups;
+	tf->tf_group.default_groups[0] = &tf->tf_disc_group;
+	tf->tf_group.default_groups[1] = NULL;
+
+	config_group_init_type_name(&tf->tf_group, name,
+			&TF_CIT_TMPL(tf)->tfc_wwn_cit);
+	config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
+			&TF_CIT_TMPL(tf)->tfc_discovery_cit);
+
+	pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
+			" %s\n", tf->tf_group.cg_item.ci_name);
+	/*
+	 * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
+	 */
+	tf->tf_ops.tf_subsys = tf->tf_subsys;
+	tf->tf_fabric = &tf->tf_group.cg_item;
+	pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
+			" for %s\n", name);
+
+	return &tf->tf_group;
+}
+
+/*
+ * Called from struct target_core_group_ops->drop_item()
+ */
+static void target_core_deregister_fabric(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct target_fabric_configfs *tf = container_of(
+		to_config_group(item), struct target_fabric_configfs, tf_group);
+	struct config_group *tf_group;
+	struct config_item *df_item;
+	int i;
+
+	pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
+		" tf list\n", config_item_name(item));
+
+	pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
+			" %s\n", tf->tf_name);
+	atomic_dec(&tf->tf_access_cnt);
+
+	pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing"
+			" tf->tf_fabric for %s\n", tf->tf_name);
+	tf->tf_fabric = NULL;
+
+	pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
+			" %s\n", config_item_name(item));
+
+	tf_group = &tf->tf_group;
+	for (i = 0; tf_group->default_groups[i]; i++) {
+		df_item = &tf_group->default_groups[i]->cg_item;
+		tf_group->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_core_fabric_group_ops = {
+	.make_group	= &target_core_register_fabric,
+	.drop_item	= &target_core_deregister_fabric,
+};
+
+/*
+ * All item attributes appearing in /sys/kernel/target/ appear here.
+ */
+static struct configfs_attribute *target_core_fabric_item_attrs[] = {
+	&target_core_item_attr_version,
+	NULL,
+};
+
+/*
+ * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
+ */
+static struct config_item_type target_core_fabrics_item = {
+	.ct_item_ops	= &target_core_fabric_item_ops,
+	.ct_group_ops	= &target_core_fabric_group_ops,
+	.ct_attrs	= target_core_fabric_item_attrs,
+	.ct_owner	= THIS_MODULE,
+};
+
+static struct configfs_subsystem target_core_fabrics = {
+	.su_group = {
+		.cg_item = {
+			.ci_namebuf = "target",
+			.ci_type = &target_core_fabrics_item,
+		},
+	},
+};
+
+static struct configfs_subsystem *target_core_subsystem[] = {
+	&target_core_fabrics,
+	NULL,
+};
+
+/*##############################################################################
+// Start functions called by external Target Fabrics Modules
+//############################################################################*/
+
+/*
+ * First function called by fabric modules to:
+ *
+ * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer.
+ * 2) Add struct target_fabric_configfs to g_tf_list
+ * 3) Return struct target_fabric_configfs to fabric module to be passed
+ *    into target_fabric_configfs_register().
+ */
+struct target_fabric_configfs *target_fabric_configfs_init(
+	struct module *fabric_mod,
+	const char *name)
+{
+	struct target_fabric_configfs *tf;
+
+	if (!(name)) {
+		pr_err("Unable to locate passed fabric name\n");
+		return ERR_PTR(-EINVAL);
+	}
+	if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) {
+		pr_err("Passed name: %s exceeds TARGET_FABRIC"
+			"_NAME_SIZE\n", name);
+		return ERR_PTR(-EINVAL);
+	}
+
+	tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
+	if (!tf)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&tf->tf_list);
+	atomic_set(&tf->tf_access_cnt, 0);
+	/*
+	 * Setup the default generic struct config_item_type's (cits) in
+	 * struct target_fabric_configfs->tf_cit_tmpl
+	 */
+	tf->tf_module = fabric_mod;
+	target_fabric_setup_cits(tf);
+
+	tf->tf_subsys = target_core_subsystem[0];
+	snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name);
+
+	mutex_lock(&g_tf_lock);
+	list_add_tail(&tf->tf_list, &g_tf_list);
+	mutex_unlock(&g_tf_lock);
+
+	pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
+			">>>>>>>>>>>>>>\n");
+	pr_debug("Initialized struct target_fabric_configfs: %p for"
+			" %s\n", tf, tf->tf_name);
+	return tf;
+}
+EXPORT_SYMBOL(target_fabric_configfs_init);
+
+/*
+ * Called by fabric plugins after FAILED target_fabric_configfs_register() call.
+ */
+void target_fabric_configfs_free(
+	struct target_fabric_configfs *tf)
+{
+	mutex_lock(&g_tf_lock);
+	list_del(&tf->tf_list);
+	mutex_unlock(&g_tf_lock);
+
+	kfree(tf);
+}
+EXPORT_SYMBOL(target_fabric_configfs_free);
+
+/*
+ * Perform a sanity check of the passed tf->tf_ops before completing
+ * TCM fabric module registration.
+ */
+static int target_fabric_tf_ops_check(
+	struct target_fabric_configfs *tf)
+{
+	struct target_core_fabric_ops *tfo = &tf->tf_ops;
+
+	if (!tfo->get_fabric_name) {
+		pr_err("Missing tfo->get_fabric_name()\n");
+		return -EINVAL;
+	}
+	if (!tfo->get_fabric_proto_ident) {
+		pr_err("Missing tfo->get_fabric_proto_ident()\n");
+		return -EINVAL;
+	}
+	if (!tfo->tpg_get_wwn) {
+		pr_err("Missing tfo->tpg_get_wwn()\n");
+		return -EINVAL;
+	}
+	if (!tfo->tpg_get_tag) {
+		pr_err("Missing tfo->tpg_get_tag()\n");
+		return -EINVAL;
+	}
+	if (!tfo->tpg_get_default_depth) {
+		pr_err("Missing tfo->tpg_get_default_depth()\n");
+		return -EINVAL;
+	}
+	if (!tfo->tpg_get_pr_transport_id) {
+		pr_err("Missing tfo->tpg_get_pr_transport_id()\n");
+		return -EINVAL;
+	}
+	if (!tfo->tpg_get_pr_transport_id_len) {
+		pr_err("Missing tfo->tpg_get_pr_transport_id_len()\n");
+		return -EINVAL;
+	}
+	if (!tfo->tpg_check_demo_mode) {
+		pr_err("Missing tfo->tpg_check_demo_mode()\n");
+		return -EINVAL;
+	}
+	if (!tfo->tpg_check_demo_mode_cache) {
+		pr_err("Missing tfo->tpg_check_demo_mode_cache()\n");
+		return -EINVAL;
+	}
+	if (!tfo->tpg_check_demo_mode_write_protect) {
+		pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n");
+		return -EINVAL;
+	}
+	if (!tfo->tpg_check_prod_mode_write_protect) {
+		pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
+		return -EINVAL;
+	}
+	if (!tfo->tpg_alloc_fabric_acl) {
+		pr_err("Missing tfo->tpg_alloc_fabric_acl()\n");
+		return -EINVAL;
+	}
+	if (!tfo->tpg_release_fabric_acl) {
+		pr_err("Missing tfo->tpg_release_fabric_acl()\n");
+		return -EINVAL;
+	}
+	if (!tfo->tpg_get_inst_index) {
+		pr_err("Missing tfo->tpg_get_inst_index()\n");
+		return -EINVAL;
+	}
+	if (!tfo->release_cmd) {
+		pr_err("Missing tfo->release_cmd()\n");
+		return -EINVAL;
+	}
+	if (!tfo->shutdown_session) {
+		pr_err("Missing tfo->shutdown_session()\n");
+		return -EINVAL;
+	}
+	if (!tfo->close_session) {
+		pr_err("Missing tfo->close_session()\n");
+		return -EINVAL;
+	}
+	if (!tfo->sess_get_index) {
+		pr_err("Missing tfo->sess_get_index()\n");
+		return -EINVAL;
+	}
+	if (!tfo->write_pending) {
+		pr_err("Missing tfo->write_pending()\n");
+		return -EINVAL;
+	}
+	if (!tfo->write_pending_status) {
+		pr_err("Missing tfo->write_pending_status()\n");
+		return -EINVAL;
+	}
+	if (!tfo->set_default_node_attributes) {
+		pr_err("Missing tfo->set_default_node_attributes()\n");
+		return -EINVAL;
+	}
+	if (!tfo->get_task_tag) {
+		pr_err("Missing tfo->get_task_tag()\n");
+		return -EINVAL;
+	}
+	if (!tfo->get_cmd_state) {
+		pr_err("Missing tfo->get_cmd_state()\n");
+		return -EINVAL;
+	}
+	if (!tfo->queue_data_in) {
+		pr_err("Missing tfo->queue_data_in()\n");
+		return -EINVAL;
+	}
+	if (!tfo->queue_status) {
+		pr_err("Missing tfo->queue_status()\n");
+		return -EINVAL;
+	}
+	if (!tfo->queue_tm_rsp) {
+		pr_err("Missing tfo->queue_tm_rsp()\n");
+		return -EINVAL;
+	}
+	if (!tfo->set_fabric_sense_len) {
+		pr_err("Missing tfo->set_fabric_sense_len()\n");
+		return -EINVAL;
+	}
+	if (!tfo->get_fabric_sense_len) {
+		pr_err("Missing tfo->get_fabric_sense_len()\n");
+		return -EINVAL;
+	}
+	/*
+	 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
+	 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
+	 * target_core_fabric_configfs.c WWN+TPG group context code.
+	 */
+	if (!tfo->fabric_make_wwn) {
+		pr_err("Missing tfo->fabric_make_wwn()\n");
+		return -EINVAL;
+	}
+	if (!tfo->fabric_drop_wwn) {
+		pr_err("Missing tfo->fabric_drop_wwn()\n");
+		return -EINVAL;
+	}
+	if (!tfo->fabric_make_tpg) {
+		pr_err("Missing tfo->fabric_make_tpg()\n");
+		return -EINVAL;
+	}
+	if (!tfo->fabric_drop_tpg) {
+		pr_err("Missing tfo->fabric_drop_tpg()\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * Called 2nd from fabric module with returned parameter of
+ * struct target_fabric_configfs * from target_fabric_configfs_init().
+ *
+ * Upon a successful registration, the new fabric's struct config_item is
+ * return.  Also, a pointer to this struct is set in the passed
+ * struct target_fabric_configfs.
+ */
+int target_fabric_configfs_register(
+	struct target_fabric_configfs *tf)
+{
+	int ret;
+
+	if (!tf) {
+		pr_err("Unable to locate target_fabric_configfs"
+			" pointer\n");
+		return -EINVAL;
+	}
+	if (!tf->tf_subsys) {
+		pr_err("Unable to target struct config_subsystem"
+			" pointer\n");
+		return -EINVAL;
+	}
+	ret = target_fabric_tf_ops_check(tf);
+	if (ret < 0)
+		return ret;
+
+	pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
+		">>>>>>>>>>\n");
+	return 0;
+}
+EXPORT_SYMBOL(target_fabric_configfs_register);
+
+void target_fabric_configfs_deregister(
+	struct target_fabric_configfs *tf)
+{
+	struct configfs_subsystem *su;
+
+	if (!tf) {
+		pr_err("Unable to locate passed target_fabric_"
+			"configfs\n");
+		return;
+	}
+	su = tf->tf_subsys;
+	if (!su) {
+		pr_err("Unable to locate passed tf->tf_subsys"
+			" pointer\n");
+		return;
+	}
+	pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
+			">>>>>>>>>>>>\n");
+	mutex_lock(&g_tf_lock);
+	if (atomic_read(&tf->tf_access_cnt)) {
+		mutex_unlock(&g_tf_lock);
+		pr_err("Non zero tf->tf_access_cnt for fabric %s\n",
+			tf->tf_name);
+		BUG();
+	}
+	list_del(&tf->tf_list);
+	mutex_unlock(&g_tf_lock);
+
+	pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
+			" %s\n", tf->tf_name);
+	tf->tf_module = NULL;
+	tf->tf_subsys = NULL;
+	kfree(tf);
+
+	pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
+			">>>>>\n");
+}
+EXPORT_SYMBOL(target_fabric_configfs_deregister);
+
+/*##############################################################################
+// Stop functions called by external Target Fabrics Modules
+//############################################################################*/
+
+/* Start functions for struct config_item_type target_core_dev_attrib_cit */
+
+#define DEF_DEV_ATTRIB_SHOW(_name)					\
+static ssize_t target_core_dev_show_attr_##_name(			\
+	struct se_dev_attrib *da,					\
+	char *page)							\
+{									\
+	struct se_device *dev;						\
+	struct se_subsystem_dev *se_dev = da->da_sub_dev;			\
+	ssize_t rb;							\
+									\
+	spin_lock(&se_dev->se_dev_lock);				\
+	dev = se_dev->se_dev_ptr;					\
+	if (!dev) {							\
+		spin_unlock(&se_dev->se_dev_lock); 			\
+		return -ENODEV;						\
+	}								\
+	rb = snprintf(page, PAGE_SIZE, "%u\n",				\
+		(u32)dev->se_sub_dev->se_dev_attrib._name);		\
+	spin_unlock(&se_dev->se_dev_lock);				\
+									\
+	return rb;							\
+}
+
+#define DEF_DEV_ATTRIB_STORE(_name)					\
+static ssize_t target_core_dev_store_attr_##_name(			\
+	struct se_dev_attrib *da,					\
+	const char *page,						\
+	size_t count)							\
+{									\
+	struct se_device *dev;						\
+	struct se_subsystem_dev *se_dev = da->da_sub_dev;			\
+	unsigned long val;						\
+	int ret;							\
+									\
+	spin_lock(&se_dev->se_dev_lock);				\
+	dev = se_dev->se_dev_ptr;					\
+	if (!dev) {							\
+		spin_unlock(&se_dev->se_dev_lock);			\
+		return -ENODEV;						\
+	}								\
+	ret = strict_strtoul(page, 0, &val);				\
+	if (ret < 0) {							\
+		spin_unlock(&se_dev->se_dev_lock);                      \
+		pr_err("strict_strtoul() failed with"		\
+			" ret: %d\n", ret);				\
+		return -EINVAL;						\
+	}								\
+	ret = se_dev_set_##_name(dev, (u32)val);			\
+	spin_unlock(&se_dev->se_dev_lock);				\
+									\
+	return (!ret) ? count : -EINVAL;				\
+}
+
+#define DEF_DEV_ATTRIB(_name)						\
+DEF_DEV_ATTRIB_SHOW(_name);						\
+DEF_DEV_ATTRIB_STORE(_name);
+
+#define DEF_DEV_ATTRIB_RO(_name)					\
+DEF_DEV_ATTRIB_SHOW(_name);
+
+CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
+#define SE_DEV_ATTR(_name, _mode)					\
+static struct target_core_dev_attrib_attribute				\
+			target_core_dev_attrib_##_name =		\
+		__CONFIGFS_EATTR(_name, _mode,				\
+		target_core_dev_show_attr_##_name,			\
+		target_core_dev_store_attr_##_name);
+
+#define SE_DEV_ATTR_RO(_name);						\
+static struct target_core_dev_attrib_attribute				\
+			target_core_dev_attrib_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_core_dev_show_attr_##_name);
+
+DEF_DEV_ATTRIB(emulate_dpo);
+SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_fua_write);
+SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_fua_read);
+SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_write_cache);
+SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl);
+SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_tas);
+SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_tpu);
+SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_tpws);
+SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(enforce_pr_isids);
+SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(is_nonrot);
+SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_rest_reord);
+SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_block_size);
+SE_DEV_ATTR_RO(hw_block_size);
+
+DEF_DEV_ATTRIB(block_size);
+SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_max_sectors);
+SE_DEV_ATTR_RO(hw_max_sectors);
+
+DEF_DEV_ATTRIB(max_sectors);
+SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(fabric_max_sectors);
+SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(optimal_sectors);
+SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_queue_depth);
+SE_DEV_ATTR_RO(hw_queue_depth);
+
+DEF_DEV_ATTRIB(queue_depth);
+SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(max_unmap_lba_count);
+SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(max_unmap_block_desc_count);
+SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(unmap_granularity);
+SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(unmap_granularity_alignment);
+SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
+
+static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
+	&target_core_dev_attrib_emulate_dpo.attr,
+	&target_core_dev_attrib_emulate_fua_write.attr,
+	&target_core_dev_attrib_emulate_fua_read.attr,
+	&target_core_dev_attrib_emulate_write_cache.attr,
+	&target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
+	&target_core_dev_attrib_emulate_tas.attr,
+	&target_core_dev_attrib_emulate_tpu.attr,
+	&target_core_dev_attrib_emulate_tpws.attr,
+	&target_core_dev_attrib_enforce_pr_isids.attr,
+	&target_core_dev_attrib_is_nonrot.attr,
+	&target_core_dev_attrib_emulate_rest_reord.attr,
+	&target_core_dev_attrib_hw_block_size.attr,
+	&target_core_dev_attrib_block_size.attr,
+	&target_core_dev_attrib_hw_max_sectors.attr,
+	&target_core_dev_attrib_max_sectors.attr,
+	&target_core_dev_attrib_fabric_max_sectors.attr,
+	&target_core_dev_attrib_optimal_sectors.attr,
+	&target_core_dev_attrib_hw_queue_depth.attr,
+	&target_core_dev_attrib_queue_depth.attr,
+	&target_core_dev_attrib_max_unmap_lba_count.attr,
+	&target_core_dev_attrib_max_unmap_block_desc_count.attr,
+	&target_core_dev_attrib_unmap_granularity.attr,
+	&target_core_dev_attrib_unmap_granularity_alignment.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_dev_attrib_ops = {
+	.show_attribute		= target_core_dev_attrib_attr_show,
+	.store_attribute	= target_core_dev_attrib_attr_store,
+};
+
+static struct config_item_type target_core_dev_attrib_cit = {
+	.ct_item_ops		= &target_core_dev_attrib_ops,
+	.ct_attrs		= target_core_dev_attrib_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_dev_attrib_cit */
+
+/*  Start functions for struct config_item_type target_core_dev_wwn_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn);
+#define SE_DEV_WWN_ATTR(_name, _mode)					\
+static struct target_core_dev_wwn_attribute target_core_dev_wwn_##_name = \
+		__CONFIGFS_EATTR(_name, _mode,				\
+		target_core_dev_wwn_show_attr_##_name,			\
+		target_core_dev_wwn_store_attr_##_name);
+
+#define SE_DEV_WWN_ATTR_RO(_name);					\
+do {									\
+	static struct target_core_dev_wwn_attribute			\
+			target_core_dev_wwn_##_name =			\
+		__CONFIGFS_EATTR_RO(_name,				\
+		target_core_dev_wwn_show_attr_##_name);			\
+} while (0);
+
+/*
+ * VPD page 0x80 Unit serial
+ */
+static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
+	struct t10_wwn *t10_wwn,
+	char *page)
+{
+	struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
+	struct se_device *dev;
+
+	dev = se_dev->se_dev_ptr;
+	if (!dev)
+		return -ENODEV;
+
+	return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
+		&t10_wwn->unit_serial[0]);
+}
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
+	struct t10_wwn *t10_wwn,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev;
+	struct se_device *dev;
+	unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
+
+	/*
+	 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
+	 * from the struct scsi_device level firmware, do not allow
+	 * VPD Unit Serial to be emulated.
+	 *
+	 * Note this struct scsi_device could also be emulating VPD
+	 * information from its drivers/scsi LLD.  But for now we assume
+	 * it is doing 'the right thing' wrt a world wide unique
+	 * VPD Unit Serial Number that OS dependent multipath can depend on.
+	 */
+	if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
+		pr_err("Underlying SCSI device firmware provided VPD"
+			" Unit Serial, ignoring request\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
+		pr_err("Emulated VPD Unit Serial exceeds"
+		" INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
+		return -EOVERFLOW;
+	}
+	/*
+	 * Check to see if any active $FABRIC_MOD exports exist.  If they
+	 * do exist, fail here as changing this information on the fly
+	 * (underneath the initiator side OS dependent multipath code)
+	 * could cause negative effects.
+	 */
+	dev = su_dev->se_dev_ptr;
+	if (dev) {
+		if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+			pr_err("Unable to set VPD Unit Serial while"
+				" active %d $FABRIC_MOD exports exist\n",
+				atomic_read(&dev->dev_export_obj.obj_access_count));
+			return -EINVAL;
+		}
+	}
+	/*
+	 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
+	 *
+	 * Also, strip any newline added from the userspace
+	 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
+	 */
+	memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
+	snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
+	snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
+			"%s", strstrip(buf));
+	su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
+
+	pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
+			" %s\n", su_dev->t10_wwn.unit_serial);
+
+	return count;
+}
+
+SE_DEV_WWN_ATTR(vpd_unit_serial, S_IRUGO | S_IWUSR);
+
+/*
+ * VPD page 0x83 Protocol Identifier
+ */
+static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
+	struct t10_wwn *t10_wwn,
+	char *page)
+{
+	struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
+	struct se_device *dev;
+	struct t10_vpd *vpd;
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	ssize_t len = 0;
+
+	dev = se_dev->se_dev_ptr;
+	if (!dev)
+		return -ENODEV;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+
+	spin_lock(&t10_wwn->t10_vpd_lock);
+	list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
+		if (!vpd->protocol_identifier_set)
+			continue;
+
+		transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
+
+		if (len + strlen(buf) >= PAGE_SIZE)
+			break;
+
+		len += sprintf(page+len, "%s", buf);
+	}
+	spin_unlock(&t10_wwn->t10_vpd_lock);
+
+	return len;
+}
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_protocol_identifier(
+	struct t10_wwn *t10_wwn,
+	const char *page,
+	size_t count)
+{
+	return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_protocol_identifier, S_IRUGO | S_IWUSR);
+
+/*
+ * Generic wrapper for dumping VPD identifiers by association.
+ */
+#define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc)				\
+static ssize_t target_core_dev_wwn_show_attr_##_name(			\
+	struct t10_wwn *t10_wwn,					\
+	char *page)							\
+{									\
+	struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;		\
+	struct se_device *dev;						\
+	struct t10_vpd *vpd;							\
+	unsigned char buf[VPD_TMP_BUF_SIZE];				\
+	ssize_t len = 0;						\
+									\
+	dev = se_dev->se_dev_ptr;					\
+	if (!dev)							\
+		return -ENODEV;						\
+									\
+	spin_lock(&t10_wwn->t10_vpd_lock);				\
+	list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {	\
+		if (vpd->association != _assoc)				\
+			continue;					\
+									\
+		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
+		transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE);	\
+		if (len + strlen(buf) >= PAGE_SIZE)			\
+			break;						\
+		len += sprintf(page+len, "%s", buf);			\
+									\
+		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
+		transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
+		if (len + strlen(buf) >= PAGE_SIZE)			\
+			break;						\
+		len += sprintf(page+len, "%s", buf);			\
+									\
+		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
+		transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
+		if (len + strlen(buf) >= PAGE_SIZE)			\
+			break;						\
+		len += sprintf(page+len, "%s", buf);			\
+	}								\
+	spin_unlock(&t10_wwn->t10_vpd_lock);				\
+									\
+	return len;							\
+}
+
+/*
+ * VPD page 0x83 Association: Logical Unit
+ */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_logical_unit(
+	struct t10_wwn *t10_wwn,
+	const char *page,
+	size_t count)
+{
+	return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_assoc_logical_unit, S_IRUGO | S_IWUSR);
+
+/*
+ * VPD page 0x83 Association: Target Port
+ */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_target_port(
+	struct t10_wwn *t10_wwn,
+	const char *page,
+	size_t count)
+{
+	return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_assoc_target_port, S_IRUGO | S_IWUSR);
+
+/*
+ * VPD page 0x83 Association: SCSI Target Device
+ */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device(
+	struct t10_wwn *t10_wwn,
+	const char *page,
+	size_t count)
+{
+	return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_assoc_scsi_target_device, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_dev_wwn, t10_wwn, t10_wwn_group);
+
+static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
+	&target_core_dev_wwn_vpd_unit_serial.attr,
+	&target_core_dev_wwn_vpd_protocol_identifier.attr,
+	&target_core_dev_wwn_vpd_assoc_logical_unit.attr,
+	&target_core_dev_wwn_vpd_assoc_target_port.attr,
+	&target_core_dev_wwn_vpd_assoc_scsi_target_device.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_dev_wwn_ops = {
+	.show_attribute		= target_core_dev_wwn_attr_show,
+	.store_attribute	= target_core_dev_wwn_attr_store,
+};
+
+static struct config_item_type target_core_dev_wwn_cit = {
+	.ct_item_ops		= &target_core_dev_wwn_ops,
+	.ct_attrs		= target_core_dev_wwn_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*  End functions for struct config_item_type target_core_dev_wwn_cit */
+
+/*  Start functions for struct config_item_type target_core_dev_pr_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev);
+#define SE_DEV_PR_ATTR(_name, _mode)					\
+static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_core_dev_pr_show_attr_##_name,				\
+	target_core_dev_pr_store_attr_##_name);
+
+#define SE_DEV_PR_ATTR_RO(_name);					\
+static struct target_core_dev_pr_attribute target_core_dev_pr_##_name =	\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_core_dev_pr_show_attr_##_name);
+
+/*
+ * res_holder
+ */
+static ssize_t target_core_dev_pr_show_spc3_res(
+	struct se_device *dev,
+	char *page,
+	ssize_t *len)
+{
+	struct se_node_acl *se_nacl;
+	struct t10_pr_registration *pr_reg;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_reg = dev->dev_pr_res_holder;
+	if (!pr_reg) {
+		*len += sprintf(page + *len, "No SPC-3 Reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		return *len;
+	}
+	se_nacl = pr_reg->pr_reg_nacl;
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+
+	*len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n",
+		se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+		se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return *len;
+}
+
+static ssize_t target_core_dev_pr_show_spc2_res(
+	struct se_device *dev,
+	char *page,
+	ssize_t *len)
+{
+	struct se_node_acl *se_nacl;
+
+	spin_lock(&dev->dev_reservation_lock);
+	se_nacl = dev->dev_reserved_node_acl;
+	if (!se_nacl) {
+		*len += sprintf(page + *len, "No SPC-2 Reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		return *len;
+	}
+	*len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n",
+		se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+		se_nacl->initiatorname);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return *len;
+}
+
+static ssize_t target_core_dev_pr_show_attr_res_holder(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	ssize_t len = 0;
+
+	if (!su_dev->se_dev_ptr)
+		return -ENODEV;
+
+	switch (su_dev->t10_pr.res_type) {
+	case SPC3_PERSISTENT_RESERVATIONS:
+		target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr,
+				page, &len);
+		break;
+	case SPC2_RESERVATIONS:
+		target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr,
+				page, &len);
+		break;
+	case SPC_PASSTHROUGH:
+		len += sprintf(page+len, "Passthrough\n");
+		break;
+	default:
+		len += sprintf(page+len, "Unknown\n");
+		break;
+	}
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_holder);
+
+/*
+ * res_pr_all_tgt_pts
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	struct se_device *dev;
+	struct t10_pr_registration *pr_reg;
+	ssize_t len = 0;
+
+	dev = su_dev->se_dev_ptr;
+	if (!dev)
+		return -ENODEV;
+
+	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return len;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_reg = dev->dev_pr_res_holder;
+	if (!pr_reg) {
+		len = sprintf(page, "No SPC-3 Reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		return len;
+	}
+	/*
+	 * See All Target Ports (ALL_TG_PT) bit in spcr17, section 6.14.3
+	 * Basic PERSISTENT RESERVER OUT parameter list, page 290
+	 */
+	if (pr_reg->pr_reg_all_tg_pt)
+		len = sprintf(page, "SPC-3 Reservation: All Target"
+			" Ports registration\n");
+	else
+		len = sprintf(page, "SPC-3 Reservation: Single"
+			" Target Port registration\n");
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
+
+/*
+ * res_pr_generation
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	if (!su_dev->se_dev_ptr)
+		return -ENODEV;
+
+	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return 0;
+
+	return sprintf(page, "0x%08x\n", su_dev->t10_pr.pr_generation);
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_generation);
+
+/*
+ * res_pr_holder_tg_port
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	struct se_device *dev;
+	struct se_node_acl *se_nacl;
+	struct se_lun *lun;
+	struct se_portal_group *se_tpg;
+	struct t10_pr_registration *pr_reg;
+	struct target_core_fabric_ops *tfo;
+	ssize_t len = 0;
+
+	dev = su_dev->se_dev_ptr;
+	if (!dev)
+		return -ENODEV;
+
+	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return len;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_reg = dev->dev_pr_res_holder;
+	if (!pr_reg) {
+		len = sprintf(page, "No SPC-3 Reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		return len;
+	}
+	se_nacl = pr_reg->pr_reg_nacl;
+	se_tpg = se_nacl->se_tpg;
+	lun = pr_reg->pr_reg_tg_pt_lun;
+	tfo = se_tpg->se_tpg_tfo;
+
+	len += sprintf(page+len, "SPC-3 Reservation: %s"
+		" Target Node Endpoint: %s\n", tfo->get_fabric_name(),
+		tfo->tpg_get_wwn(se_tpg));
+	len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
+		" Identifer Tag: %hu %s Portal Group Tag: %hu"
+		" %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi,
+		tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
+		tfo->get_fabric_name(), lun->unpacked_lun);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
+
+/*
+ * res_pr_registered_i_pts
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	struct target_core_fabric_ops *tfo;
+	struct t10_pr_registration *pr_reg;
+	unsigned char buf[384];
+	char i_buf[PR_REG_ISID_ID_LEN];
+	ssize_t len = 0;
+	int reg_count = 0, prf_isid;
+
+	if (!su_dev->se_dev_ptr)
+		return -ENODEV;
+
+	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return len;
+
+	len += sprintf(page+len, "SPC-3 PR Registrations:\n");
+
+	spin_lock(&su_dev->t10_pr.registration_lock);
+	list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
+			pr_reg_list) {
+
+		memset(buf, 0, 384);
+		memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+		tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
+		prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+					PR_REG_ISID_ID_LEN);
+		sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
+			tfo->get_fabric_name(),
+			pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ?
+			&i_buf[0] : "", pr_reg->pr_res_key,
+			pr_reg->pr_res_generation);
+
+		if (len + strlen(buf) >= PAGE_SIZE)
+			break;
+
+		len += sprintf(page+len, "%s", buf);
+		reg_count++;
+	}
+	spin_unlock(&su_dev->t10_pr.registration_lock);
+
+	if (!reg_count)
+		len += sprintf(page+len, "None\n");
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
+
+/*
+ * res_pr_type
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_type(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	struct se_device *dev;
+	struct t10_pr_registration *pr_reg;
+	ssize_t len = 0;
+
+	dev = su_dev->se_dev_ptr;
+	if (!dev)
+		return -ENODEV;
+
+	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return len;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_reg = dev->dev_pr_res_holder;
+	if (!pr_reg) {
+		len = sprintf(page, "No SPC-3 Reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		return len;
+	}
+	len = sprintf(page, "SPC-3 Reservation Type: %s\n",
+		core_scsi3_pr_dump_type(pr_reg->pr_res_type));
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_type);
+
+/*
+ * res_type
+ */
+static ssize_t target_core_dev_pr_show_attr_res_type(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	ssize_t len = 0;
+
+	if (!su_dev->se_dev_ptr)
+		return -ENODEV;
+
+	switch (su_dev->t10_pr.res_type) {
+	case SPC3_PERSISTENT_RESERVATIONS:
+		len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
+		break;
+	case SPC2_RESERVATIONS:
+		len = sprintf(page, "SPC2_RESERVATIONS\n");
+		break;
+	case SPC_PASSTHROUGH:
+		len = sprintf(page, "SPC_PASSTHROUGH\n");
+		break;
+	default:
+		len = sprintf(page, "UNKNOWN\n");
+		break;
+	}
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_type);
+
+/*
+ * res_aptpl_active
+ */
+
+static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	if (!su_dev->se_dev_ptr)
+		return -ENODEV;
+
+	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return 0;
+
+	return sprintf(page, "APTPL Bit Status: %s\n",
+		(su_dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
+}
+
+SE_DEV_PR_ATTR_RO(res_aptpl_active);
+
+/*
+ * res_aptpl_metadata
+ */
+static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	if (!su_dev->se_dev_ptr)
+		return -ENODEV;
+
+	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return 0;
+
+	return sprintf(page, "Ready to process PR APTPL metadata..\n");
+}
+
+enum {
+	Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
+	Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
+	Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
+	Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_initiator_fabric, "initiator_fabric=%s"},
+	{Opt_initiator_node, "initiator_node=%s"},
+	{Opt_initiator_sid, "initiator_sid=%s"},
+	{Opt_sa_res_key, "sa_res_key=%s"},
+	{Opt_res_holder, "res_holder=%d"},
+	{Opt_res_type, "res_type=%d"},
+	{Opt_res_scope, "res_scope=%d"},
+	{Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
+	{Opt_mapped_lun, "mapped_lun=%d"},
+	{Opt_target_fabric, "target_fabric=%s"},
+	{Opt_target_node, "target_node=%s"},
+	{Opt_tpgt, "tpgt=%d"},
+	{Opt_port_rtpi, "port_rtpi=%d"},
+	{Opt_target_lun, "target_lun=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
+	struct se_subsystem_dev *su_dev,
+	const char *page,
+	size_t count)
+{
+	struct se_device *dev;
+	unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
+	unsigned char *t_fabric = NULL, *t_port = NULL;
+	char *orig, *ptr, *arg_p, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	unsigned long long tmp_ll;
+	u64 sa_res_key = 0;
+	u32 mapped_lun = 0, target_lun = 0;
+	int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
+	u16 port_rpti = 0, tpgt = 0;
+	u8 type = 0, scope;
+
+	dev = su_dev->se_dev_ptr;
+	if (!dev)
+		return -ENODEV;
+
+	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return 0;
+
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		pr_debug("Unable to process APTPL metadata while"
+			" active fabric exports exist\n");
+		return -EINVAL;
+	}
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+	while ((ptr = strsep(&opts, ",\n")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_initiator_fabric:
+			i_fabric = match_strdup(&args[0]);
+			if (!i_fabric) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			break;
+		case Opt_initiator_node:
+			i_port = match_strdup(&args[0]);
+			if (!i_port) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
+				pr_err("APTPL metadata initiator_node="
+					" exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
+					PR_APTPL_MAX_IPORT_LEN);
+				ret = -EINVAL;
+				break;
+			}
+			break;
+		case Opt_initiator_sid:
+			isid = match_strdup(&args[0]);
+			if (!isid) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			if (strlen(isid) >= PR_REG_ISID_LEN) {
+				pr_err("APTPL metadata initiator_isid"
+					"= exceeds PR_REG_ISID_LEN: %d\n",
+					PR_REG_ISID_LEN);
+				ret = -EINVAL;
+				break;
+			}
+			break;
+		case Opt_sa_res_key:
+			arg_p = match_strdup(&args[0]);
+			if (!arg_p) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			ret = strict_strtoull(arg_p, 0, &tmp_ll);
+			if (ret < 0) {
+				pr_err("strict_strtoull() failed for"
+					" sa_res_key=\n");
+				goto out;
+			}
+			sa_res_key = (u64)tmp_ll;
+			break;
+		/*
+		 * PR APTPL Metadata for Reservation
+		 */
+		case Opt_res_holder:
+			match_int(args, &arg);
+			res_holder = arg;
+			break;
+		case Opt_res_type:
+			match_int(args, &arg);
+			type = (u8)arg;
+			break;
+		case Opt_res_scope:
+			match_int(args, &arg);
+			scope = (u8)arg;
+			break;
+		case Opt_res_all_tg_pt:
+			match_int(args, &arg);
+			all_tg_pt = (int)arg;
+			break;
+		case Opt_mapped_lun:
+			match_int(args, &arg);
+			mapped_lun = (u32)arg;
+			break;
+		/*
+		 * PR APTPL Metadata for Target Port
+		 */
+		case Opt_target_fabric:
+			t_fabric = match_strdup(&args[0]);
+			if (!t_fabric) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			break;
+		case Opt_target_node:
+			t_port = match_strdup(&args[0]);
+			if (!t_port) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
+				pr_err("APTPL metadata target_node="
+					" exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
+					PR_APTPL_MAX_TPORT_LEN);
+				ret = -EINVAL;
+				break;
+			}
+			break;
+		case Opt_tpgt:
+			match_int(args, &arg);
+			tpgt = (u16)arg;
+			break;
+		case Opt_port_rtpi:
+			match_int(args, &arg);
+			port_rpti = (u16)arg;
+			break;
+		case Opt_target_lun:
+			match_int(args, &arg);
+			target_lun = (u32)arg;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (!i_port || !t_port || !sa_res_key) {
+		pr_err("Illegal parameters for APTPL registration\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (res_holder && !(type)) {
+		pr_err("Illegal PR type: 0x%02x for reservation"
+				" holder\n", type);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = core_scsi3_alloc_aptpl_registration(&su_dev->t10_pr, sa_res_key,
+			i_port, isid, mapped_lun, t_port, tpgt, target_lun,
+			res_holder, all_tg_pt, type);
+out:
+	kfree(i_fabric);
+	kfree(i_port);
+	kfree(isid);
+	kfree(t_fabric);
+	kfree(t_port);
+	kfree(orig);
+	return (ret == 0) ? count : ret;
+}
+
+SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group);
+
+static struct configfs_attribute *target_core_dev_pr_attrs[] = {
+	&target_core_dev_pr_res_holder.attr,
+	&target_core_dev_pr_res_pr_all_tgt_pts.attr,
+	&target_core_dev_pr_res_pr_generation.attr,
+	&target_core_dev_pr_res_pr_holder_tg_port.attr,
+	&target_core_dev_pr_res_pr_registered_i_pts.attr,
+	&target_core_dev_pr_res_pr_type.attr,
+	&target_core_dev_pr_res_type.attr,
+	&target_core_dev_pr_res_aptpl_active.attr,
+	&target_core_dev_pr_res_aptpl_metadata.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_dev_pr_ops = {
+	.show_attribute		= target_core_dev_pr_attr_show,
+	.store_attribute	= target_core_dev_pr_attr_store,
+};
+
+static struct config_item_type target_core_dev_pr_cit = {
+	.ct_item_ops		= &target_core_dev_pr_ops,
+	.ct_attrs		= target_core_dev_pr_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*  End functions for struct config_item_type target_core_dev_pr_cit */
+
+/*  Start functions for struct config_item_type target_core_dev_cit */
+
+static ssize_t target_core_show_dev_info(void *p, char *page)
+{
+	struct se_subsystem_dev *se_dev = p;
+	struct se_hba *hba = se_dev->se_dev_hba;
+	struct se_subsystem_api *t = hba->transport;
+	int bl = 0;
+	ssize_t read_bytes = 0;
+
+	if (!se_dev->se_dev_ptr)
+		return -ENODEV;
+
+	transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
+	read_bytes += bl;
+	read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes);
+	return read_bytes;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_info = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "info",
+		    .ca_mode = S_IRUGO },
+	.show	= target_core_show_dev_info,
+	.store	= NULL,
+};
+
+static ssize_t target_core_store_dev_control(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *se_dev = p;
+	struct se_hba *hba = se_dev->se_dev_hba;
+	struct se_subsystem_api *t = hba->transport;
+
+	if (!se_dev->se_dev_su_ptr) {
+		pr_err("Unable to locate struct se_subsystem_dev>se"
+				"_dev_su_ptr\n");
+		return -EINVAL;
+	}
+
+	return t->set_configfs_dev_params(hba, se_dev, page, count);
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_control = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "control",
+		    .ca_mode = S_IWUSR },
+	.show	= NULL,
+	.store	= target_core_store_dev_control,
+};
+
+static ssize_t target_core_show_dev_alias(void *p, char *page)
+{
+	struct se_subsystem_dev *se_dev = p;
+
+	if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
+		return 0;
+
+	return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias);
+}
+
+static ssize_t target_core_store_dev_alias(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *se_dev = p;
+	struct se_hba *hba = se_dev->se_dev_hba;
+	ssize_t read_bytes;
+
+	if (count > (SE_DEV_ALIAS_LEN-1)) {
+		pr_err("alias count: %d exceeds"
+			" SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
+			SE_DEV_ALIAS_LEN-1);
+		return -EINVAL;
+	}
+
+	read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
+			"%s", page);
+	if (!read_bytes)
+		return -EINVAL;
+	if (se_dev->se_dev_alias[read_bytes - 1] == '\n')
+		se_dev->se_dev_alias[read_bytes - 1] = '\0';
+
+	se_dev->su_dev_flags |= SDF_USING_ALIAS;
+
+	pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
+		config_item_name(&hba->hba_group.cg_item),
+		config_item_name(&se_dev->se_dev_group.cg_item),
+		se_dev->se_dev_alias);
+
+	return read_bytes;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_alias = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "alias",
+		    .ca_mode =  S_IRUGO | S_IWUSR },
+	.show	= target_core_show_dev_alias,
+	.store	= target_core_store_dev_alias,
+};
+
+static ssize_t target_core_show_dev_udev_path(void *p, char *page)
+{
+	struct se_subsystem_dev *se_dev = p;
+
+	if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
+		return 0;
+
+	return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path);
+}
+
+static ssize_t target_core_store_dev_udev_path(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *se_dev = p;
+	struct se_hba *hba = se_dev->se_dev_hba;
+	ssize_t read_bytes;
+
+	if (count > (SE_UDEV_PATH_LEN-1)) {
+		pr_err("udev_path count: %d exceeds"
+			" SE_UDEV_PATH_LEN-1: %u\n", (int)count,
+			SE_UDEV_PATH_LEN-1);
+		return -EINVAL;
+	}
+
+	read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
+			"%s", page);
+	if (!read_bytes)
+		return -EINVAL;
+	if (se_dev->se_dev_udev_path[read_bytes - 1] == '\n')
+		se_dev->se_dev_udev_path[read_bytes - 1] = '\0';
+
+	se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
+
+	pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
+		config_item_name(&hba->hba_group.cg_item),
+		config_item_name(&se_dev->se_dev_group.cg_item),
+		se_dev->se_dev_udev_path);
+
+	return read_bytes;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "udev_path",
+		    .ca_mode =  S_IRUGO | S_IWUSR },
+	.show	= target_core_show_dev_udev_path,
+	.store	= target_core_store_dev_udev_path,
+};
+
+static ssize_t target_core_store_dev_enable(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *se_dev = p;
+	struct se_device *dev;
+	struct se_hba *hba = se_dev->se_dev_hba;
+	struct se_subsystem_api *t = hba->transport;
+	char *ptr;
+
+	ptr = strstr(page, "1");
+	if (!ptr) {
+		pr_err("For dev_enable ops, only valid value"
+				" is \"1\"\n");
+		return -EINVAL;
+	}
+	if (se_dev->se_dev_ptr) {
+		pr_err("se_dev->se_dev_ptr already set for storage"
+				" object\n");
+		return -EEXIST;
+	}
+
+	if (t->check_configfs_dev_params(hba, se_dev) < 0)
+		return -EINVAL;
+
+	dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
+	if (IS_ERR(dev))
+		return PTR_ERR(dev);
+	else if (!dev)
+		return -EINVAL;
+
+	se_dev->se_dev_ptr = dev;
+	pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
+		" %p\n", se_dev->se_dev_ptr);
+
+	return count;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_enable = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "enable",
+		    .ca_mode = S_IWUSR },
+	.show	= NULL,
+	.store	= target_core_store_dev_enable,
+};
+
+static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
+{
+	struct se_device *dev;
+	struct se_subsystem_dev *su_dev = p;
+	struct config_item *lu_ci;
+	struct t10_alua_lu_gp *lu_gp;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	ssize_t len = 0;
+
+	dev = su_dev->se_dev_ptr;
+	if (!dev)
+		return -ENODEV;
+
+	if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
+		return len;
+
+	lu_gp_mem = dev->dev_alua_lu_gp_mem;
+	if (!lu_gp_mem) {
+		pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
+				" pointer\n");
+		return -EINVAL;
+	}
+
+	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+	lu_gp = lu_gp_mem->lu_gp;
+	if (lu_gp) {
+		lu_ci = &lu_gp->lu_gp_group.cg_item;
+		len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
+			config_item_name(lu_ci), lu_gp->lu_gp_id);
+	}
+	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+	return len;
+}
+
+static ssize_t target_core_store_alua_lu_gp(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_device *dev;
+	struct se_subsystem_dev *su_dev = p;
+	struct se_hba *hba = su_dev->se_dev_hba;
+	struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	unsigned char buf[LU_GROUP_NAME_BUF];
+	int move = 0;
+
+	dev = su_dev->se_dev_ptr;
+	if (!dev)
+		return -ENODEV;
+
+	if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
+		pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n",
+			config_item_name(&hba->hba_group.cg_item),
+			config_item_name(&su_dev->se_dev_group.cg_item));
+		return -EINVAL;
+	}
+	if (count > LU_GROUP_NAME_BUF) {
+		pr_err("ALUA LU Group Alias too large!\n");
+		return -EINVAL;
+	}
+	memset(buf, 0, LU_GROUP_NAME_BUF);
+	memcpy(buf, page, count);
+	/*
+	 * Any ALUA logical unit alias besides "NULL" means we will be
+	 * making a new group association.
+	 */
+	if (strcmp(strstrip(buf), "NULL")) {
+		/*
+		 * core_alua_get_lu_gp_by_name() will increment reference to
+		 * struct t10_alua_lu_gp.  This reference is released with
+		 * core_alua_get_lu_gp_by_name below().
+		 */
+		lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
+		if (!lu_gp_new)
+			return -ENODEV;
+	}
+	lu_gp_mem = dev->dev_alua_lu_gp_mem;
+	if (!lu_gp_mem) {
+		if (lu_gp_new)
+			core_alua_put_lu_gp_from_name(lu_gp_new);
+		pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
+				" pointer\n");
+		return -EINVAL;
+	}
+
+	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+	lu_gp = lu_gp_mem->lu_gp;
+	if (lu_gp) {
+		/*
+		 * Clearing an existing lu_gp association, and replacing
+		 * with NULL
+		 */
+		if (!lu_gp_new) {
+			pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
+				" from ALUA LU Group: core/alua/lu_gps/%s, ID:"
+				" %hu\n",
+				config_item_name(&hba->hba_group.cg_item),
+				config_item_name(&su_dev->se_dev_group.cg_item),
+				config_item_name(&lu_gp->lu_gp_group.cg_item),
+				lu_gp->lu_gp_id);
+
+			__core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
+			spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+			return count;
+		}
+		/*
+		 * Removing existing association of lu_gp_mem with lu_gp
+		 */
+		__core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
+		move = 1;
+	}
+	/*
+	 * Associate lu_gp_mem with lu_gp_new.
+	 */
+	__core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
+	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+	pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
+		" core/alua/lu_gps/%s, ID: %hu\n",
+		(move) ? "Moving" : "Adding",
+		config_item_name(&hba->hba_group.cg_item),
+		config_item_name(&su_dev->se_dev_group.cg_item),
+		config_item_name(&lu_gp_new->lu_gp_group.cg_item),
+		lu_gp_new->lu_gp_id);
+
+	core_alua_put_lu_gp_from_name(lu_gp_new);
+	return count;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "alua_lu_gp",
+		    .ca_mode = S_IRUGO | S_IWUSR },
+	.show	= target_core_show_alua_lu_gp,
+	.store	= target_core_store_alua_lu_gp,
+};
+
+static struct configfs_attribute *lio_core_dev_attrs[] = {
+	&target_core_attr_dev_info.attr,
+	&target_core_attr_dev_control.attr,
+	&target_core_attr_dev_alias.attr,
+	&target_core_attr_dev_udev_path.attr,
+	&target_core_attr_dev_enable.attr,
+	&target_core_attr_dev_alua_lu_gp.attr,
+	NULL,
+};
+
+static void target_core_dev_release(struct config_item *item)
+{
+	struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
+				struct se_subsystem_dev, se_dev_group);
+	struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
+	struct se_subsystem_api *t = hba->transport;
+	struct config_group *dev_cg = &se_dev->se_dev_group;
+
+	kfree(dev_cg->default_groups);
+	/*
+	 * This pointer will set when the storage is enabled with:
+	 *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
+	 */
+	if (se_dev->se_dev_ptr) {
+		pr_debug("Target_Core_ConfigFS: Calling se_free_"
+			"virtual_device() for se_dev_ptr: %p\n",
+			se_dev->se_dev_ptr);
+
+		se_free_virtual_device(se_dev->se_dev_ptr, hba);
+	} else {
+		/*
+		 * Release struct se_subsystem_dev->se_dev_su_ptr..
+		 */
+		pr_debug("Target_Core_ConfigFS: Calling t->free_"
+			"device() for se_dev_su_ptr: %p\n",
+			se_dev->se_dev_su_ptr);
+
+		t->free_device(se_dev->se_dev_su_ptr);
+	}
+
+	pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem"
+			"_dev_t: %p\n", se_dev);
+	kfree(se_dev);
+}
+
+static ssize_t target_core_dev_show(struct config_item *item,
+				     struct configfs_attribute *attr,
+				     char *page)
+{
+	struct se_subsystem_dev *se_dev = container_of(
+			to_config_group(item), struct se_subsystem_dev,
+			se_dev_group);
+	struct target_core_configfs_attribute *tc_attr = container_of(
+			attr, struct target_core_configfs_attribute, attr);
+
+	if (!tc_attr->show)
+		return -EINVAL;
+
+	return tc_attr->show(se_dev, page);
+}
+
+static ssize_t target_core_dev_store(struct config_item *item,
+				      struct configfs_attribute *attr,
+				      const char *page, size_t count)
+{
+	struct se_subsystem_dev *se_dev = container_of(
+			to_config_group(item), struct se_subsystem_dev,
+			se_dev_group);
+	struct target_core_configfs_attribute *tc_attr = container_of(
+			attr, struct target_core_configfs_attribute, attr);
+
+	if (!tc_attr->store)
+		return -EINVAL;
+
+	return tc_attr->store(se_dev, page, count);
+}
+
+static struct configfs_item_operations target_core_dev_item_ops = {
+	.release		= target_core_dev_release,
+	.show_attribute		= target_core_dev_show,
+	.store_attribute	= target_core_dev_store,
+};
+
+static struct config_item_type target_core_dev_cit = {
+	.ct_item_ops		= &target_core_dev_item_ops,
+	.ct_attrs		= lio_core_dev_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_dev_cit */
+
+/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_alua_lu_gp, t10_alua_lu_gp);
+#define SE_DEV_ALUA_LU_ATTR(_name, _mode)				\
+static struct target_core_alua_lu_gp_attribute				\
+			target_core_alua_lu_gp_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_core_alua_lu_gp_show_attr_##_name,			\
+	target_core_alua_lu_gp_store_attr_##_name);
+
+#define SE_DEV_ALUA_LU_ATTR_RO(_name)					\
+static struct target_core_alua_lu_gp_attribute				\
+			target_core_alua_lu_gp_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_core_alua_lu_gp_show_attr_##_name);
+
+/*
+ * lu_gp_id
+ */
+static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id(
+	struct t10_alua_lu_gp *lu_gp,
+	char *page)
+{
+	if (!lu_gp->lu_gp_valid_id)
+		return 0;
+
+	return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
+}
+
+static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
+	struct t10_alua_lu_gp *lu_gp,
+	const char *page,
+	size_t count)
+{
+	struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
+	unsigned long lu_gp_id;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &lu_gp_id);
+	if (ret < 0) {
+		pr_err("strict_strtoul() returned %d for"
+			" lu_gp_id\n", ret);
+		return -EINVAL;
+	}
+	if (lu_gp_id > 0x0000ffff) {
+		pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
+			" 0x0000ffff\n", lu_gp_id);
+		return -EINVAL;
+	}
+
+	ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
+	if (ret < 0)
+		return -EINVAL;
+
+	pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
+		" Group: core/alua/lu_gps/%s to ID: %hu\n",
+		config_item_name(&alua_lu_gp_cg->cg_item),
+		lu_gp->lu_gp_id);
+
+	return count;
+}
+
+SE_DEV_ALUA_LU_ATTR(lu_gp_id, S_IRUGO | S_IWUSR);
+
+/*
+ * members
+ */
+static ssize_t target_core_alua_lu_gp_show_attr_members(
+	struct t10_alua_lu_gp *lu_gp,
+	char *page)
+{
+	struct se_device *dev;
+	struct se_hba *hba;
+	struct se_subsystem_dev *su_dev;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	ssize_t len = 0, cur_len;
+	unsigned char buf[LU_GROUP_NAME_BUF];
+
+	memset(buf, 0, LU_GROUP_NAME_BUF);
+
+	spin_lock(&lu_gp->lu_gp_lock);
+	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
+		dev = lu_gp_mem->lu_gp_mem_dev;
+		su_dev = dev->se_sub_dev;
+		hba = su_dev->se_dev_hba;
+
+		cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
+			config_item_name(&hba->hba_group.cg_item),
+			config_item_name(&su_dev->se_dev_group.cg_item));
+		cur_len++; /* Extra byte for NULL terminator */
+
+		if ((cur_len + len) > PAGE_SIZE) {
+			pr_warn("Ran out of lu_gp_show_attr"
+				"_members buffer\n");
+			break;
+		}
+		memcpy(page+len, buf, cur_len);
+		len += cur_len;
+	}
+	spin_unlock(&lu_gp->lu_gp_lock);
+
+	return len;
+}
+
+SE_DEV_ALUA_LU_ATTR_RO(members);
+
+CONFIGFS_EATTR_OPS(target_core_alua_lu_gp, t10_alua_lu_gp, lu_gp_group);
+
+static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
+	&target_core_alua_lu_gp_lu_gp_id.attr,
+	&target_core_alua_lu_gp_members.attr,
+	NULL,
+};
+
+static void target_core_alua_lu_gp_release(struct config_item *item)
+{
+	struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
+			struct t10_alua_lu_gp, lu_gp_group);
+
+	core_alua_free_lu_gp(lu_gp);
+}
+
+static struct configfs_item_operations target_core_alua_lu_gp_ops = {
+	.release		= target_core_alua_lu_gp_release,
+	.show_attribute		= target_core_alua_lu_gp_attr_show,
+	.store_attribute	= target_core_alua_lu_gp_attr_store,
+};
+
+static struct config_item_type target_core_alua_lu_gp_cit = {
+	.ct_item_ops		= &target_core_alua_lu_gp_ops,
+	.ct_attrs		= target_core_alua_lu_gp_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_lu_gp_cit */
+
+/* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
+
+static struct config_group *target_core_alua_create_lu_gp(
+	struct config_group *group,
+	const char *name)
+{
+	struct t10_alua_lu_gp *lu_gp;
+	struct config_group *alua_lu_gp_cg = NULL;
+	struct config_item *alua_lu_gp_ci = NULL;
+
+	lu_gp = core_alua_allocate_lu_gp(name, 0);
+	if (IS_ERR(lu_gp))
+		return NULL;
+
+	alua_lu_gp_cg = &lu_gp->lu_gp_group;
+	alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
+
+	config_group_init_type_name(alua_lu_gp_cg, name,
+			&target_core_alua_lu_gp_cit);
+
+	pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
+		" Group: core/alua/lu_gps/%s\n",
+		config_item_name(alua_lu_gp_ci));
+
+	return alua_lu_gp_cg;
+
+}
+
+static void target_core_alua_drop_lu_gp(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
+			struct t10_alua_lu_gp, lu_gp_group);
+
+	pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
+		" Group: core/alua/lu_gps/%s, ID: %hu\n",
+		config_item_name(item), lu_gp->lu_gp_id);
+	/*
+	 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
+	 * -> target_core_alua_lu_gp_release()
+	 */
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
+	.make_group		= &target_core_alua_create_lu_gp,
+	.drop_item		= &target_core_alua_drop_lu_gp,
+};
+
+static struct config_item_type target_core_alua_lu_gps_cit = {
+	.ct_item_ops		= NULL,
+	.ct_group_ops		= &target_core_alua_lu_gps_group_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_lu_gps_cit */
+
+/* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp);
+#define SE_DEV_ALUA_TG_PT_ATTR(_name, _mode)				\
+static struct target_core_alua_tg_pt_gp_attribute			\
+			target_core_alua_tg_pt_gp_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_core_alua_tg_pt_gp_show_attr_##_name,			\
+	target_core_alua_tg_pt_gp_store_attr_##_name);
+
+#define SE_DEV_ALUA_TG_PT_ATTR_RO(_name)				\
+static struct target_core_alua_tg_pt_gp_attribute			\
+			target_core_alua_tg_pt_gp_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_core_alua_tg_pt_gp_show_attr_##_name);
+
+/*
+ * alua_access_state
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_state(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n",
+		atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state));
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+	unsigned long tmp;
+	int new_state, ret;
+
+	if (!tg_pt_gp->tg_pt_gp_valid_id) {
+		pr_err("Unable to do implict ALUA on non valid"
+			" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
+		return -EINVAL;
+	}
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract new ALUA access state from"
+				" %s\n", page);
+		return -EINVAL;
+	}
+	new_state = (int)tmp;
+
+	if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
+		pr_err("Unable to process implict configfs ALUA"
+			" transition while TPGS_IMPLICT_ALUA is disabled\n");
+		return -EINVAL;
+	}
+
+	ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr,
+					NULL, NULL, new_state, 0);
+	return (!ret) ? count : -EINVAL;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_access_state, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_access_status
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_status(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%s\n",
+		core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int new_status, ret;
+
+	if (!tg_pt_gp->tg_pt_gp_valid_id) {
+		pr_err("Unable to do set ALUA access status on non"
+			" valid tg_pt_gp ID: %hu\n",
+			tg_pt_gp->tg_pt_gp_valid_id);
+		return -EINVAL;
+	}
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract new ALUA access status"
+				" from %s\n", page);
+		return -EINVAL;
+	}
+	new_status = (int)tmp;
+
+	if ((new_status != ALUA_STATUS_NONE) &&
+	    (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
+	    (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
+		pr_err("Illegal ALUA access status: 0x%02x\n",
+				new_status);
+		return -EINVAL;
+	}
+
+	tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
+	return count;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_access_status, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_access_type
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_type(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return core_alua_show_access_type(tg_pt_gp, page);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	return core_alua_store_access_type(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_write_metadata
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_write_metadata);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		pr_err("Unable to extract alua_write_metadata\n");
+		return -EINVAL;
+	}
+
+	if ((tmp != 0) && (tmp != 1)) {
+		pr_err("Illegal value for alua_write_metadata:"
+			" %lu\n", tmp);
+		return -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
+
+	return count;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_write_metadata, S_IRUGO | S_IWUSR);
+
+
+
+/*
+ * nonop_delay_msecs
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return core_alua_show_nonop_delay_msecs(tg_pt_gp, page);
+
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	return core_alua_store_nonop_delay_msecs(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(nonop_delay_msecs, S_IRUGO | S_IWUSR);
+
+/*
+ * trans_delay_msecs
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return core_alua_show_trans_delay_msecs(tg_pt_gp, page);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	return core_alua_store_trans_delay_msecs(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
+
+/*
+ * preferred
+ */
+
+static ssize_t target_core_alua_tg_pt_gp_show_attr_preferred(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return core_alua_show_preferred_bit(tg_pt_gp, page);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_preferred(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	return core_alua_store_preferred_bit(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(preferred, S_IRUGO | S_IWUSR);
+
+/*
+ * tg_pt_gp_id
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	if (!tg_pt_gp->tg_pt_gp_valid_id)
+		return 0;
+
+	return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
+	unsigned long tg_pt_gp_id;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tg_pt_gp_id);
+	if (ret < 0) {
+		pr_err("strict_strtoul() returned %d for"
+			" tg_pt_gp_id\n", ret);
+		return -EINVAL;
+	}
+	if (tg_pt_gp_id > 0x0000ffff) {
+		pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:"
+			" 0x0000ffff\n", tg_pt_gp_id);
+		return -EINVAL;
+	}
+
+	ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
+	if (ret < 0)
+		return -EINVAL;
+
+	pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
+		"core/alua/tg_pt_gps/%s to ID: %hu\n",
+		config_item_name(&alua_tg_pt_gp_cg->cg_item),
+		tg_pt_gp->tg_pt_gp_id);
+
+	return count;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(tg_pt_gp_id, S_IRUGO | S_IWUSR);
+
+/*
+ * members
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	struct se_port *port;
+	struct se_portal_group *tpg;
+	struct se_lun *lun;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	ssize_t len = 0, cur_len;
+	unsigned char buf[TG_PT_GROUP_NAME_BUF];
+
+	memset(buf, 0, TG_PT_GROUP_NAME_BUF);
+
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
+			tg_pt_gp_mem_list) {
+		port = tg_pt_gp_mem->tg_pt;
+		tpg = port->sep_tpg;
+		lun = port->sep_lun;
+
+		cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
+			"/%s\n", tpg->se_tpg_tfo->get_fabric_name(),
+			tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+			tpg->se_tpg_tfo->tpg_get_tag(tpg),
+			config_item_name(&lun->lun_group.cg_item));
+		cur_len++; /* Extra byte for NULL terminator */
+
+		if ((cur_len + len) > PAGE_SIZE) {
+			pr_warn("Ran out of lu_gp_show_attr"
+				"_members buffer\n");
+			break;
+		}
+		memcpy(page+len, buf, cur_len);
+		len += cur_len;
+	}
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+	return len;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR_RO(members);
+
+CONFIGFS_EATTR_OPS(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp,
+			tg_pt_gp_group);
+
+static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
+	&target_core_alua_tg_pt_gp_alua_access_state.attr,
+	&target_core_alua_tg_pt_gp_alua_access_status.attr,
+	&target_core_alua_tg_pt_gp_alua_access_type.attr,
+	&target_core_alua_tg_pt_gp_alua_write_metadata.attr,
+	&target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
+	&target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
+	&target_core_alua_tg_pt_gp_preferred.attr,
+	&target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
+	&target_core_alua_tg_pt_gp_members.attr,
+	NULL,
+};
+
+static void target_core_alua_tg_pt_gp_release(struct config_item *item)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
+			struct t10_alua_tg_pt_gp, tg_pt_gp_group);
+
+	core_alua_free_tg_pt_gp(tg_pt_gp);
+}
+
+static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
+	.release		= target_core_alua_tg_pt_gp_release,
+	.show_attribute		= target_core_alua_tg_pt_gp_attr_show,
+	.store_attribute	= target_core_alua_tg_pt_gp_attr_store,
+};
+
+static struct config_item_type target_core_alua_tg_pt_gp_cit = {
+	.ct_item_ops		= &target_core_alua_tg_pt_gp_ops,
+	.ct_attrs		= target_core_alua_tg_pt_gp_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
+
+/* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
+
+static struct config_group *target_core_alua_create_tg_pt_gp(
+	struct config_group *group,
+	const char *name)
+{
+	struct t10_alua *alua = container_of(group, struct t10_alua,
+					alua_tg_pt_gps_group);
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct se_subsystem_dev *su_dev = alua->t10_sub_dev;
+	struct config_group *alua_tg_pt_gp_cg = NULL;
+	struct config_item *alua_tg_pt_gp_ci = NULL;
+
+	tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
+	if (!tg_pt_gp)
+		return NULL;
+
+	alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
+	alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
+
+	config_group_init_type_name(alua_tg_pt_gp_cg, name,
+			&target_core_alua_tg_pt_gp_cit);
+
+	pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
+		" Group: alua/tg_pt_gps/%s\n",
+		config_item_name(alua_tg_pt_gp_ci));
+
+	return alua_tg_pt_gp_cg;
+}
+
+static void target_core_alua_drop_tg_pt_gp(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
+			struct t10_alua_tg_pt_gp, tg_pt_gp_group);
+
+	pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
+		" Group: alua/tg_pt_gps/%s, ID: %hu\n",
+		config_item_name(item), tg_pt_gp->tg_pt_gp_id);
+	/*
+	 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
+	 * -> target_core_alua_tg_pt_gp_release().
+	 */
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
+	.make_group		= &target_core_alua_create_tg_pt_gp,
+	.drop_item		= &target_core_alua_drop_tg_pt_gp,
+};
+
+static struct config_item_type target_core_alua_tg_pt_gps_cit = {
+	.ct_group_ops		= &target_core_alua_tg_pt_gps_group_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
+
+/* Start functions for struct config_item_type target_core_alua_cit */
+
+/*
+ * target_core_alua_cit is a ConfigFS group that lives under
+ * /sys/kernel/config/target/core/alua.  There are default groups
+ * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
+ * target_core_alua_cit in target_core_init_configfs() below.
+ */
+static struct config_item_type target_core_alua_cit = {
+	.ct_item_ops		= NULL,
+	.ct_attrs		= NULL,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_cit */
+
+/* Start functions for struct config_item_type target_core_stat_cit */
+
+static struct config_group *target_core_stat_mkdir(
+	struct config_group *group,
+	const char *name)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static void target_core_stat_rmdir(
+	struct config_group *group,
+	struct config_item *item)
+{
+	return;
+}
+
+static struct configfs_group_operations target_core_stat_group_ops = {
+	.make_group		= &target_core_stat_mkdir,
+	.drop_item		= &target_core_stat_rmdir,
+};
+
+static struct config_item_type target_core_stat_cit = {
+	.ct_group_ops		= &target_core_stat_group_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_stat_cit */
+
+/* Start functions for struct config_item_type target_core_hba_cit */
+
+static struct config_group *target_core_make_subdev(
+	struct config_group *group,
+	const char *name)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct se_subsystem_dev *se_dev;
+	struct se_subsystem_api *t;
+	struct config_item *hba_ci = &group->cg_item;
+	struct se_hba *hba = item_to_hba(hba_ci);
+	struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
+	struct config_group *dev_stat_grp = NULL;
+	int errno = -ENOMEM, ret;
+
+	ret = mutex_lock_interruptible(&hba->hba_access_mutex);
+	if (ret)
+		return ERR_PTR(ret);
+	/*
+	 * Locate the struct se_subsystem_api from parent's struct se_hba.
+	 */
+	t = hba->transport;
+
+	se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
+	if (!se_dev) {
+		pr_err("Unable to allocate memory for"
+				" struct se_subsystem_dev\n");
+		goto unlock;
+	}
+	INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
+	spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
+	INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
+	INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
+	spin_lock_init(&se_dev->t10_pr.registration_lock);
+	spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
+	INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
+	spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
+	spin_lock_init(&se_dev->se_dev_lock);
+	se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+	se_dev->t10_wwn.t10_sub_dev = se_dev;
+	se_dev->t10_alua.t10_sub_dev = se_dev;
+	se_dev->se_dev_attrib.da_sub_dev = se_dev;
+
+	se_dev->se_dev_hba = hba;
+	dev_cg = &se_dev->se_dev_group;
+
+	dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7,
+			GFP_KERNEL);
+	if (!dev_cg->default_groups)
+		goto out;
+	/*
+	 * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
+	 * for ->allocate_virtdevice()
+	 *
+	 * se_dev->se_dev_ptr will be set after ->create_virtdev()
+	 * has been called successfully in the next level up in the
+	 * configfs tree for device object's struct config_group.
+	 */
+	se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
+	if (!se_dev->se_dev_su_ptr) {
+		pr_err("Unable to locate subsystem dependent pointer"
+			" from allocate_virtdevice()\n");
+		goto out;
+	}
+
+	config_group_init_type_name(&se_dev->se_dev_group, name,
+			&target_core_dev_cit);
+	config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib",
+			&target_core_dev_attrib_cit);
+	config_group_init_type_name(&se_dev->se_dev_pr_group, "pr",
+			&target_core_dev_pr_cit);
+	config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn",
+			&target_core_dev_wwn_cit);
+	config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group,
+			"alua", &target_core_alua_tg_pt_gps_cit);
+	config_group_init_type_name(&se_dev->dev_stat_grps.stat_group,
+			"statistics", &target_core_stat_cit);
+
+	dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group;
+	dev_cg->default_groups[1] = &se_dev->se_dev_pr_group;
+	dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group;
+	dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group;
+	dev_cg->default_groups[4] = &se_dev->dev_stat_grps.stat_group;
+	dev_cg->default_groups[5] = NULL;
+	/*
+	 * Add core/$HBA/$DEV/alua/default_tg_pt_gp
+	 */
+	tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
+	if (!tg_pt_gp)
+		goto out;
+
+	tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
+	tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+				GFP_KERNEL);
+	if (!tg_pt_gp_cg->default_groups) {
+		pr_err("Unable to allocate tg_pt_gp_cg->"
+				"default_groups\n");
+		goto out;
+	}
+
+	config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
+			"default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
+	tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
+	tg_pt_gp_cg->default_groups[1] = NULL;
+	se_dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
+	/*
+	 * Add core/$HBA/$DEV/statistics/ default groups
+	 */
+	dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
+	dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4,
+				GFP_KERNEL);
+	if (!dev_stat_grp->default_groups) {
+		pr_err("Unable to allocate dev_stat_grp->default_groups\n");
+		goto out;
+	}
+	target_stat_setup_dev_default_groups(se_dev);
+
+	pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
+		" %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
+
+	mutex_unlock(&hba->hba_access_mutex);
+	return &se_dev->se_dev_group;
+out:
+	if (se_dev->t10_alua.default_tg_pt_gp) {
+		core_alua_free_tg_pt_gp(se_dev->t10_alua.default_tg_pt_gp);
+		se_dev->t10_alua.default_tg_pt_gp = NULL;
+	}
+	if (dev_stat_grp)
+		kfree(dev_stat_grp->default_groups);
+	if (tg_pt_gp_cg)
+		kfree(tg_pt_gp_cg->default_groups);
+	if (dev_cg)
+		kfree(dev_cg->default_groups);
+	if (se_dev->se_dev_su_ptr)
+		t->free_device(se_dev->se_dev_su_ptr);
+	kfree(se_dev);
+unlock:
+	mutex_unlock(&hba->hba_access_mutex);
+	return ERR_PTR(errno);
+}
+
+static void target_core_drop_subdev(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
+				struct se_subsystem_dev, se_dev_group);
+	struct se_hba *hba;
+	struct config_item *df_item;
+	struct config_group *dev_cg, *tg_pt_gp_cg, *dev_stat_grp;
+	int i;
+
+	hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
+
+	mutex_lock(&hba->hba_access_mutex);
+
+	dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
+	for (i = 0; dev_stat_grp->default_groups[i]; i++) {
+		df_item = &dev_stat_grp->default_groups[i]->cg_item;
+		dev_stat_grp->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(dev_stat_grp->default_groups);
+
+	tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
+	for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
+		df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
+		tg_pt_gp_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(tg_pt_gp_cg->default_groups);
+	/*
+	 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
+	 * directly from target_core_alua_tg_pt_gp_release().
+	 */
+	se_dev->t10_alua.default_tg_pt_gp = NULL;
+
+	dev_cg = &se_dev->se_dev_group;
+	for (i = 0; dev_cg->default_groups[i]; i++) {
+		df_item = &dev_cg->default_groups[i]->cg_item;
+		dev_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	/*
+	 * The releasing of se_dev and associated se_dev->se_dev_ptr is done
+	 * from target_core_dev_item_ops->release() ->target_core_dev_release().
+	 */
+	config_item_put(item);
+	mutex_unlock(&hba->hba_access_mutex);
+}
+
+static struct configfs_group_operations target_core_hba_group_ops = {
+	.make_group		= target_core_make_subdev,
+	.drop_item		= target_core_drop_subdev,
+};
+
+CONFIGFS_EATTR_STRUCT(target_core_hba, se_hba);
+#define SE_HBA_ATTR(_name, _mode)				\
+static struct target_core_hba_attribute				\
+		target_core_hba_##_name =			\
+		__CONFIGFS_EATTR(_name, _mode,			\
+		target_core_hba_show_attr_##_name,		\
+		target_core_hba_store_attr_##_name);
+
+#define SE_HBA_ATTR_RO(_name)					\
+static struct target_core_hba_attribute				\
+		target_core_hba_##_name =			\
+		__CONFIGFS_EATTR_RO(_name,			\
+		target_core_hba_show_attr_##_name);
+
+static ssize_t target_core_hba_show_attr_hba_info(
+	struct se_hba *hba,
+	char *page)
+{
+	return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
+			hba->hba_id, hba->transport->name,
+			TARGET_CORE_CONFIGFS_VERSION);
+}
+
+SE_HBA_ATTR_RO(hba_info);
+
+static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba,
+				char *page)
+{
+	int hba_mode = 0;
+
+	if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
+		hba_mode = 1;
+
+	return sprintf(page, "%d\n", hba_mode);
+}
+
+static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
+				const char *page, size_t count)
+{
+	struct se_subsystem_api *transport = hba->transport;
+	unsigned long mode_flag;
+	int ret;
+
+	if (transport->pmode_enable_hba == NULL)
+		return -EINVAL;
+
+	ret = strict_strtoul(page, 0, &mode_flag);
+	if (ret < 0) {
+		pr_err("Unable to extract hba mode flag: %d\n", ret);
+		return -EINVAL;
+	}
+
+	spin_lock(&hba->device_lock);
+	if (!list_empty(&hba->hba_dev_list)) {
+		pr_err("Unable to set hba_mode with active devices\n");
+		spin_unlock(&hba->device_lock);
+		return -EINVAL;
+	}
+	spin_unlock(&hba->device_lock);
+
+	ret = transport->pmode_enable_hba(hba, mode_flag);
+	if (ret < 0)
+		return -EINVAL;
+	if (ret > 0)
+		hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
+	else if (ret == 0)
+		hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+
+	return count;
+}
+
+SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
+
+static void target_core_hba_release(struct config_item *item)
+{
+	struct se_hba *hba = container_of(to_config_group(item),
+				struct se_hba, hba_group);
+	core_delete_hba(hba);
+}
+
+static struct configfs_attribute *target_core_hba_attrs[] = {
+	&target_core_hba_hba_info.attr,
+	&target_core_hba_hba_mode.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_hba_item_ops = {
+	.release		= target_core_hba_release,
+	.show_attribute		= target_core_hba_attr_show,
+	.store_attribute	= target_core_hba_attr_store,
+};
+
+static struct config_item_type target_core_hba_cit = {
+	.ct_item_ops		= &target_core_hba_item_ops,
+	.ct_group_ops		= &target_core_hba_group_ops,
+	.ct_attrs		= target_core_hba_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+static struct config_group *target_core_call_addhbatotarget(
+	struct config_group *group,
+	const char *name)
+{
+	char *se_plugin_str, *str, *str2;
+	struct se_hba *hba;
+	char buf[TARGET_CORE_NAME_MAX_LEN];
+	unsigned long plugin_dep_id = 0;
+	int ret;
+
+	memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
+	if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
+		pr_err("Passed *name strlen(): %d exceeds"
+			" TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
+			TARGET_CORE_NAME_MAX_LEN);
+		return ERR_PTR(-ENAMETOOLONG);
+	}
+	snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
+
+	str = strstr(buf, "_");
+	if (!str) {
+		pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
+		return ERR_PTR(-EINVAL);
+	}
+	se_plugin_str = buf;
+	/*
+	 * Special case for subsystem plugins that have "_" in their names.
+	 * Namely rd_direct and rd_mcp..
+	 */
+	str2 = strstr(str+1, "_");
+	if (str2) {
+		*str2 = '\0'; /* Terminate for *se_plugin_str */
+		str2++; /* Skip to start of plugin dependent ID */
+		str = str2;
+	} else {
+		*str = '\0'; /* Terminate for *se_plugin_str */
+		str++; /* Skip to start of plugin dependent ID */
+	}
+
+	ret = strict_strtoul(str, 0, &plugin_dep_id);
+	if (ret < 0) {
+		pr_err("strict_strtoul() returned %d for"
+				" plugin_dep_id\n", ret);
+		return ERR_PTR(-EINVAL);
+	}
+	/*
+	 * Load up TCM subsystem plugins if they have not already been loaded.
+	 */
+	transport_subsystem_check_init();
+
+	hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
+	if (IS_ERR(hba))
+		return ERR_CAST(hba);
+
+	config_group_init_type_name(&hba->hba_group, name,
+			&target_core_hba_cit);
+
+	return &hba->hba_group;
+}
+
+static void target_core_call_delhbafromtarget(
+	struct config_group *group,
+	struct config_item *item)
+{
+	/*
+	 * core_delete_hba() is called from target_core_hba_item_ops->release()
+	 * -> target_core_hba_release()
+	 */
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_core_group_ops = {
+	.make_group	= target_core_call_addhbatotarget,
+	.drop_item	= target_core_call_delhbafromtarget,
+};
+
+static struct config_item_type target_core_cit = {
+	.ct_item_ops	= NULL,
+	.ct_group_ops	= &target_core_group_ops,
+	.ct_attrs	= NULL,
+	.ct_owner	= THIS_MODULE,
+};
+
+/* Stop functions for struct config_item_type target_core_hba_cit */
+
+static int __init target_core_init_configfs(void)
+{
+	struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
+	struct config_group *lu_gp_cg = NULL;
+	struct configfs_subsystem *subsys;
+	struct t10_alua_lu_gp *lu_gp;
+	int ret;
+
+	pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
+		" Engine: %s on %s/%s on "UTS_RELEASE"\n",
+		TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
+
+	subsys = target_core_subsystem[0];
+	config_group_init(&subsys->su_group);
+	mutex_init(&subsys->su_mutex);
+
+	ret = init_se_kmem_caches();
+	if (ret < 0)
+		return ret;
+	/*
+	 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
+	 * and ALUA Logical Unit Group and Target Port Group infrastructure.
+	 */
+	target_cg = &subsys->su_group;
+	target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+				GFP_KERNEL);
+	if (!target_cg->default_groups) {
+		pr_err("Unable to allocate target_cg->default_groups\n");
+		ret = -ENOMEM;
+		goto out_global;
+	}
+
+	config_group_init_type_name(&target_core_hbagroup,
+			"core", &target_core_cit);
+	target_cg->default_groups[0] = &target_core_hbagroup;
+	target_cg->default_groups[1] = NULL;
+	/*
+	 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
+	 */
+	hba_cg = &target_core_hbagroup;
+	hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+				GFP_KERNEL);
+	if (!hba_cg->default_groups) {
+		pr_err("Unable to allocate hba_cg->default_groups\n");
+		ret = -ENOMEM;
+		goto out_global;
+	}
+	config_group_init_type_name(&alua_group,
+			"alua", &target_core_alua_cit);
+	hba_cg->default_groups[0] = &alua_group;
+	hba_cg->default_groups[1] = NULL;
+	/*
+	 * Add ALUA Logical Unit Group and Target Port Group ConfigFS
+	 * groups under /sys/kernel/config/target/core/alua/
+	 */
+	alua_cg = &alua_group;
+	alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+			GFP_KERNEL);
+	if (!alua_cg->default_groups) {
+		pr_err("Unable to allocate alua_cg->default_groups\n");
+		ret = -ENOMEM;
+		goto out_global;
+	}
+
+	config_group_init_type_name(&alua_lu_gps_group,
+			"lu_gps", &target_core_alua_lu_gps_cit);
+	alua_cg->default_groups[0] = &alua_lu_gps_group;
+	alua_cg->default_groups[1] = NULL;
+	/*
+	 * Add core/alua/lu_gps/default_lu_gp
+	 */
+	lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
+	if (IS_ERR(lu_gp)) {
+		ret = -ENOMEM;
+		goto out_global;
+	}
+
+	lu_gp_cg = &alua_lu_gps_group;
+	lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+			GFP_KERNEL);
+	if (!lu_gp_cg->default_groups) {
+		pr_err("Unable to allocate lu_gp_cg->default_groups\n");
+		ret = -ENOMEM;
+		goto out_global;
+	}
+
+	config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
+				&target_core_alua_lu_gp_cit);
+	lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
+	lu_gp_cg->default_groups[1] = NULL;
+	default_lu_gp = lu_gp;
+	/*
+	 * Register the target_core_mod subsystem with configfs.
+	 */
+	ret = configfs_register_subsystem(subsys);
+	if (ret < 0) {
+		pr_err("Error %d while registering subsystem %s\n",
+			ret, subsys->su_group.cg_item.ci_namebuf);
+		goto out_global;
+	}
+	pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
+		" Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
+		" on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
+	/*
+	 * Register built-in RAMDISK subsystem logic for virtual LUN 0
+	 */
+	ret = rd_module_init();
+	if (ret < 0)
+		goto out;
+
+	ret = core_dev_setup_virtual_lun0();
+	if (ret < 0)
+		goto out;
+
+	return 0;
+
+out:
+	configfs_unregister_subsystem(subsys);
+	core_dev_release_virtual_lun0();
+	rd_module_exit();
+out_global:
+	if (default_lu_gp) {
+		core_alua_free_lu_gp(default_lu_gp);
+		default_lu_gp = NULL;
+	}
+	if (lu_gp_cg)
+		kfree(lu_gp_cg->default_groups);
+	if (alua_cg)
+		kfree(alua_cg->default_groups);
+	if (hba_cg)
+		kfree(hba_cg->default_groups);
+	kfree(target_cg->default_groups);
+	release_se_kmem_caches();
+	return ret;
+}
+
+static void __exit target_core_exit_configfs(void)
+{
+	struct configfs_subsystem *subsys;
+	struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
+	struct config_item *item;
+	int i;
+
+	subsys = target_core_subsystem[0];
+
+	lu_gp_cg = &alua_lu_gps_group;
+	for (i = 0; lu_gp_cg->default_groups[i]; i++) {
+		item = &lu_gp_cg->default_groups[i]->cg_item;
+		lu_gp_cg->default_groups[i] = NULL;
+		config_item_put(item);
+	}
+	kfree(lu_gp_cg->default_groups);
+	lu_gp_cg->default_groups = NULL;
+
+	alua_cg = &alua_group;
+	for (i = 0; alua_cg->default_groups[i]; i++) {
+		item = &alua_cg->default_groups[i]->cg_item;
+		alua_cg->default_groups[i] = NULL;
+		config_item_put(item);
+	}
+	kfree(alua_cg->default_groups);
+	alua_cg->default_groups = NULL;
+
+	hba_cg = &target_core_hbagroup;
+	for (i = 0; hba_cg->default_groups[i]; i++) {
+		item = &hba_cg->default_groups[i]->cg_item;
+		hba_cg->default_groups[i] = NULL;
+		config_item_put(item);
+	}
+	kfree(hba_cg->default_groups);
+	hba_cg->default_groups = NULL;
+	/*
+	 * We expect subsys->su_group.default_groups to be released
+	 * by configfs subsystem provider logic..
+	 */
+	configfs_unregister_subsystem(subsys);
+	kfree(subsys->su_group.default_groups);
+
+	core_alua_free_lu_gp(default_lu_gp);
+	default_lu_gp = NULL;
+
+	pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
+			" Infrastructure\n");
+
+	core_dev_release_virtual_lun0();
+	rd_module_exit();
+	release_se_kmem_caches();
+}
+
+MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(target_core_init_configfs);
+module_exit(target_core_exit_configfs);
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_device.c b/ap/os/linux/linux-3.4.x/drivers/target/target_core_device.c
new file mode 100644
index 0000000..79d9865
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_device.c
@@ -0,0 +1,1698 @@
+/*******************************************************************************
+ * Filename:  target_core_device.c (based on iscsi_target_device.c)
+ *
+ * This file contains the TCM Virtual Device and Disk Transport
+ * agnostic related functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005-2006 SBE, Inc.  All Rights Reserved.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/in.h>
+#include <linux/export.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+static void se_dev_start(struct se_device *dev);
+static void se_dev_stop(struct se_device *dev);
+
+static struct se_hba *lun0_hba;
+static struct se_subsystem_dev *lun0_su_dev;
+/* not static, needed by tpg.c */
+struct se_device *g_lun0_dev;
+
+int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
+{
+	struct se_lun *se_lun = NULL;
+	struct se_session *se_sess = se_cmd->se_sess;
+	struct se_device *dev;
+	unsigned long flags;
+
+	if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
+		se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
+		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		return -ENODEV;
+	}
+
+	spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
+	se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
+	if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+		struct se_dev_entry *deve = se_cmd->se_deve;
+
+		deve->total_cmds++;
+		deve->total_bytes += se_cmd->data_length;
+
+		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
+		    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
+			se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
+				" Access for 0x%08x\n",
+				se_cmd->se_tfo->get_fabric_name(),
+				unpacked_lun);
+			spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
+			return -EACCES;
+		}
+
+		if (se_cmd->data_direction == DMA_TO_DEVICE)
+			deve->write_bytes += se_cmd->data_length;
+		else if (se_cmd->data_direction == DMA_FROM_DEVICE)
+			deve->read_bytes += se_cmd->data_length;
+
+		deve->deve_cmds++;
+
+		se_lun = deve->se_lun;
+		se_cmd->se_lun = deve->se_lun;
+		se_cmd->pr_res_key = deve->pr_res_key;
+		se_cmd->orig_fe_lun = unpacked_lun;
+		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+	}
+	spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
+
+	if (!se_lun) {
+		/*
+		 * Use the se_portal_group->tpg_virt_lun0 to allow for
+		 * REPORT_LUNS, et al to be returned when no active
+		 * MappedLUN=0 exists for this Initiator Port.
+		 */
+		if (unpacked_lun != 0) {
+			se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
+			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
+				" Access for 0x%08x\n",
+				se_cmd->se_tfo->get_fabric_name(),
+				unpacked_lun);
+			return -ENODEV;
+		}
+		/*
+		 * Force WRITE PROTECT for virtual LUN 0
+		 */
+		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
+		    (se_cmd->data_direction != DMA_NONE)) {
+			se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			return -EACCES;
+		}
+
+		se_lun = &se_sess->se_tpg->tpg_virt_lun0;
+		se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
+		se_cmd->orig_fe_lun = 0;
+		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+	}
+	/*
+	 * Determine if the struct se_lun is online.
+	 * FIXME: Check for LUN_RESET + UNIT Attention
+	 */
+	if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
+		se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
+		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		return -ENODEV;
+	}
+
+	/* Directly associate cmd with se_dev */
+	se_cmd->se_dev = se_lun->lun_se_dev;
+
+	/* TODO: get rid of this and use atomics for stats */
+	dev = se_lun->lun_se_dev;
+	spin_lock_irqsave(&dev->stats_lock, flags);
+	dev->num_cmds++;
+	if (se_cmd->data_direction == DMA_TO_DEVICE)
+		dev->write_bytes += se_cmd->data_length;
+	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
+		dev->read_bytes += se_cmd->data_length;
+	spin_unlock_irqrestore(&dev->stats_lock, flags);
+
+	spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
+	list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
+	spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(transport_lookup_cmd_lun);
+
+int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *se_lun = NULL;
+	struct se_session *se_sess = se_cmd->se_sess;
+	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+	unsigned long flags;
+
+	if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
+		se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
+		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		return -ENODEV;
+	}
+
+	spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
+	se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
+	deve = se_cmd->se_deve;
+
+	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+		se_tmr->tmr_lun = deve->se_lun;
+		se_cmd->se_lun = deve->se_lun;
+		se_lun = deve->se_lun;
+		se_cmd->pr_res_key = deve->pr_res_key;
+		se_cmd->orig_fe_lun = unpacked_lun;
+	}
+	spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
+
+	if (!se_lun) {
+		pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
+			" Access for 0x%08x\n",
+			se_cmd->se_tfo->get_fabric_name(),
+			unpacked_lun);
+		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		return -ENODEV;
+	}
+	/*
+	 * Determine if the struct se_lun is online.
+	 * FIXME: Check for LUN_RESET + UNIT Attention
+	 */
+	if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
+		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		return -ENODEV;
+	}
+
+	/* Directly associate cmd with se_dev */
+	se_cmd->se_dev = se_lun->lun_se_dev;
+	se_tmr->tmr_dev = se_lun->lun_se_dev;
+
+	spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
+	list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
+	spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(transport_lookup_tmr_lun);
+
+/*
+ * This function is called from core_scsi3_emulate_pro_register_and_move()
+ * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
+ * when a matching rtpi is found.
+ */
+struct se_dev_entry *core_get_se_deve_from_rtpi(
+	struct se_node_acl *nacl,
+	u16 rtpi)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	struct se_port *port;
+	struct se_portal_group *tpg = nacl->se_tpg;
+	u32 i;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		deve = nacl->device_list[i];
+
+		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+			continue;
+
+		lun = deve->se_lun;
+		if (!lun) {
+			pr_err("%s device entries device pointer is"
+				" NULL, but Initiator has access.\n",
+				tpg->se_tpg_tfo->get_fabric_name());
+			continue;
+		}
+		port = lun->lun_sep;
+		if (!port) {
+			pr_err("%s device entries device pointer is"
+				" NULL, but Initiator has access.\n",
+				tpg->se_tpg_tfo->get_fabric_name());
+			continue;
+		}
+		if (port->sep_rtpi != rtpi)
+			continue;
+
+		atomic_inc(&deve->pr_ref_count);
+		smp_mb__after_atomic_inc();
+		spin_unlock_irq(&nacl->device_list_lock);
+
+		return deve;
+	}
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	return NULL;
+}
+
+int core_free_device_list_for_node(
+	struct se_node_acl *nacl,
+	struct se_portal_group *tpg)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	u32 i;
+
+	if (!nacl->device_list)
+		return 0;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		deve = nacl->device_list[i];
+
+		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+			continue;
+
+		if (!deve->se_lun) {
+			pr_err("%s device entries device pointer is"
+				" NULL, but Initiator has access.\n",
+				tpg->se_tpg_tfo->get_fabric_name());
+			continue;
+		}
+		lun = deve->se_lun;
+
+		spin_unlock_irq(&nacl->device_list_lock);
+		core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
+			TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+		spin_lock_irq(&nacl->device_list_lock);
+	}
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
+	nacl->device_list = NULL;
+
+	return 0;
+}
+
+void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
+{
+	struct se_dev_entry *deve;
+	unsigned long flags;
+
+	spin_lock_irqsave(&se_nacl->device_list_lock, flags);
+	deve = se_nacl->device_list[se_cmd->orig_fe_lun];
+	deve->deve_cmds--;
+	spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
+}
+
+void core_update_device_list_access(
+	u32 mapped_lun,
+	u32 lun_access,
+	struct se_node_acl *nacl)
+{
+	struct se_dev_entry *deve;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[mapped_lun];
+	if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
+		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
+		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+	} else {
+		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
+		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+	}
+	spin_unlock_irq(&nacl->device_list_lock);
+}
+
+/*      core_update_device_list_for_node():
+ *
+ *
+ */
+int core_update_device_list_for_node(
+	struct se_lun *lun,
+	struct se_lun_acl *lun_acl,
+	u32 mapped_lun,
+	u32 lun_access,
+	struct se_node_acl *nacl,
+	struct se_portal_group *tpg,
+	int enable)
+{
+	struct se_port *port = lun->lun_sep;
+	struct se_dev_entry *deve = nacl->device_list[mapped_lun];
+	int trans = 0;
+	/*
+	 * If the MappedLUN entry is being disabled, the entry in
+	 * port->sep_alua_list must be removed now before clearing the
+	 * struct se_dev_entry pointers below as logic in
+	 * core_alua_do_transition_tg_pt() depends on these being present.
+	 */
+	if (!enable) {
+		/*
+		 * deve->se_lun_acl will be NULL for demo-mode created LUNs
+		 * that have not been explicitly concerted to MappedLUNs ->
+		 * struct se_lun_acl, but we remove deve->alua_port_list from
+		 * port->sep_alua_list. This also means that active UAs and
+		 * NodeACL context specific PR metadata for demo-mode
+		 * MappedLUN *deve will be released below..
+		 */
+		spin_lock_bh(&port->sep_alua_lock);
+		list_del(&deve->alua_port_list);
+		spin_unlock_bh(&port->sep_alua_lock);
+	}
+
+	spin_lock_irq(&nacl->device_list_lock);
+	if (enable) {
+		/*
+		 * Check if the call is handling demo mode -> explict LUN ACL
+		 * transition.  This transition must be for the same struct se_lun
+		 * + mapped_lun that was setup in demo mode..
+		 */
+		if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+			if (deve->se_lun_acl != NULL) {
+				pr_err("struct se_dev_entry->se_lun_acl"
+					" already set for demo mode -> explict"
+					" LUN ACL transition\n");
+				spin_unlock_irq(&nacl->device_list_lock);
+				return -EINVAL;
+			}
+			if (deve->se_lun != lun) {
+				pr_err("struct se_dev_entry->se_lun does"
+					" match passed struct se_lun for demo mode"
+					" -> explict LUN ACL transition\n");
+				spin_unlock_irq(&nacl->device_list_lock);
+				return -EINVAL;
+			}
+			deve->se_lun_acl = lun_acl;
+			trans = 1;
+		} else {
+			deve->se_lun = lun;
+			deve->se_lun_acl = lun_acl;
+			deve->mapped_lun = mapped_lun;
+			deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
+		}
+
+		if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
+			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
+			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+		} else {
+			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
+			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+		}
+
+		if (trans) {
+			spin_unlock_irq(&nacl->device_list_lock);
+			return 0;
+		}
+		deve->creation_time = get_jiffies_64();
+		deve->attach_count++;
+		spin_unlock_irq(&nacl->device_list_lock);
+
+		spin_lock_bh(&port->sep_alua_lock);
+		list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
+		spin_unlock_bh(&port->sep_alua_lock);
+
+		return 0;
+	}
+	/*
+	 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
+	 * PR operation to complete.
+	 */
+	spin_unlock_irq(&nacl->device_list_lock);
+	while (atomic_read(&deve->pr_ref_count) != 0)
+		cpu_relax();
+	spin_lock_irq(&nacl->device_list_lock);
+	/*
+	 * Disable struct se_dev_entry LUN ACL mapping
+	 */
+	core_scsi3_ua_release_all(deve);
+	deve->se_lun = NULL;
+	deve->se_lun_acl = NULL;
+	deve->lun_flags = 0;
+	deve->creation_time = 0;
+	deve->attach_count--;
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
+	return 0;
+}
+
+/*      core_clear_lun_from_tpg():
+ *
+ *
+ */
+void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
+{
+	struct se_node_acl *nacl;
+	struct se_dev_entry *deve;
+	u32 i;
+
+	spin_lock_irq(&tpg->acl_node_lock);
+	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
+		spin_unlock_irq(&tpg->acl_node_lock);
+
+		spin_lock_irq(&nacl->device_list_lock);
+		for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+			deve = nacl->device_list[i];
+			if (lun != deve->se_lun)
+				continue;
+			spin_unlock_irq(&nacl->device_list_lock);
+
+			core_update_device_list_for_node(lun, NULL,
+				deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
+				nacl, tpg, 0);
+
+			spin_lock_irq(&nacl->device_list_lock);
+		}
+		spin_unlock_irq(&nacl->device_list_lock);
+
+		spin_lock_irq(&tpg->acl_node_lock);
+	}
+	spin_unlock_irq(&tpg->acl_node_lock);
+}
+
+static struct se_port *core_alloc_port(struct se_device *dev)
+{
+	struct se_port *port, *port_tmp;
+
+	port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
+	if (!port) {
+		pr_err("Unable to allocate struct se_port\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&port->sep_alua_list);
+	INIT_LIST_HEAD(&port->sep_list);
+	atomic_set(&port->sep_tg_pt_secondary_offline, 0);
+	spin_lock_init(&port->sep_alua_lock);
+	mutex_init(&port->sep_tg_pt_md_mutex);
+
+	spin_lock(&dev->se_port_lock);
+	if (dev->dev_port_count == 0x0000ffff) {
+		pr_warn("Reached dev->dev_port_count =="
+				" 0x0000ffff\n");
+		spin_unlock(&dev->se_port_lock);
+		return ERR_PTR(-ENOSPC);
+	}
+again:
+	/*
+	 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
+	 * Here is the table from spc4r17 section 7.7.3.8.
+	 *
+	 *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
+	 *
+	 * Code      Description
+	 * 0h        Reserved
+	 * 1h        Relative port 1, historically known as port A
+	 * 2h        Relative port 2, historically known as port B
+	 * 3h to FFFFh    Relative port 3 through 65 535
+	 */
+	port->sep_rtpi = dev->dev_rpti_counter++;
+	if (!port->sep_rtpi)
+		goto again;
+
+	list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
+		/*
+		 * Make sure RELATIVE TARGET PORT IDENTIFER is unique
+		 * for 16-bit wrap..
+		 */
+		if (port->sep_rtpi == port_tmp->sep_rtpi)
+			goto again;
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	return port;
+}
+
+static void core_export_port(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_port *port,
+	struct se_lun *lun)
+{
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
+
+	spin_lock(&dev->se_port_lock);
+	spin_lock(&lun->lun_sep_lock);
+	port->sep_tpg = tpg;
+	port->sep_lun = lun;
+	lun->lun_sep = port;
+	spin_unlock(&lun->lun_sep_lock);
+
+	list_add_tail(&port->sep_list, &dev->dev_sep_list);
+	spin_unlock(&dev->se_port_lock);
+
+	if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+		tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
+		if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
+			pr_err("Unable to allocate t10_alua_tg_pt"
+					"_gp_member_t\n");
+			return;
+		}
+		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+			su_dev->t10_alua.default_tg_pt_gp);
+		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		pr_debug("%s/%s: Adding to default ALUA Target Port"
+			" Group: alua/default_tg_pt_gp\n",
+			dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
+	}
+
+	dev->dev_port_count++;
+	port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
+}
+
+/*
+ *	Called with struct se_device->se_port_lock spinlock held.
+ */
+static void core_release_port(struct se_device *dev, struct se_port *port)
+	__releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
+{
+	/*
+	 * Wait for any port reference for PR ALL_TG_PT=1 operation
+	 * to complete in __core_scsi3_alloc_registration()
+	 */
+	spin_unlock(&dev->se_port_lock);
+	if (atomic_read(&port->sep_tg_pt_ref_cnt))
+		cpu_relax();
+	spin_lock(&dev->se_port_lock);
+
+	core_alua_free_tg_pt_gp_mem(port);
+
+	list_del(&port->sep_list);
+	dev->dev_port_count--;
+	kfree(port);
+}
+
+int core_dev_export(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_lun *lun)
+{
+	struct se_port *port;
+
+	port = core_alloc_port(dev);
+	if (IS_ERR(port))
+		return PTR_ERR(port);
+
+	lun->lun_se_dev = dev;
+	se_dev_start(dev);
+
+	atomic_inc(&dev->dev_export_obj.obj_access_count);
+	core_export_port(dev, tpg, port, lun);
+	return 0;
+}
+
+void core_dev_unexport(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_lun *lun)
+{
+	struct se_port *port = lun->lun_sep;
+
+	spin_lock(&lun->lun_sep_lock);
+	if (lun->lun_se_dev == NULL) {
+		spin_unlock(&lun->lun_sep_lock);
+		return;
+	}
+	spin_unlock(&lun->lun_sep_lock);
+
+	spin_lock(&dev->se_port_lock);
+	atomic_dec(&dev->dev_export_obj.obj_access_count);
+	core_release_port(dev, port);
+	spin_unlock(&dev->se_port_lock);
+
+	se_dev_stop(dev);
+	lun->lun_se_dev = NULL;
+}
+
+int target_report_luns(struct se_task *se_task)
+{
+	struct se_cmd *se_cmd = se_task->task_se_cmd;
+	struct se_dev_entry *deve;
+	struct se_session *se_sess = se_cmd->se_sess;
+	unsigned char *buf;
+	u32 lun_count = 0, offset = 8, i;
+
+	buf = transport_kmap_data_sg(se_cmd);
+	if (!buf)
+		return -ENOMEM;
+
+	/*
+	 * If no struct se_session pointer is present, this struct se_cmd is
+	 * coming via a target_core_mod PASSTHROUGH op, and not through
+	 * a $FABRIC_MOD.  In that case, report LUN=0 only.
+	 */
+	if (!se_sess) {
+		int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
+		lun_count = 1;
+		goto done;
+	}
+
+	spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		deve = se_sess->se_node_acl->device_list[i];
+		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+			continue;
+		/*
+		 * We determine the correct LUN LIST LENGTH even once we
+		 * have reached the initial allocation length.
+		 * See SPC2-R20 7.19.
+		 */
+		lun_count++;
+		if ((offset + 8) > se_cmd->data_length)
+			continue;
+
+		int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
+		offset += 8;
+	}
+	spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
+
+	/*
+	 * See SPC3 r07, page 159.
+	 */
+done:
+	lun_count *= 8;
+	buf[0] = ((lun_count >> 24) & 0xff);
+	buf[1] = ((lun_count >> 16) & 0xff);
+	buf[2] = ((lun_count >> 8) & 0xff);
+	buf[3] = (lun_count & 0xff);
+	transport_kunmap_data_sg(se_cmd);
+
+	se_task->task_scsi_status = GOOD;
+	transport_complete_task(se_task, 1);
+	return 0;
+}
+
+/*	se_release_device_for_hba():
+ *
+ *
+ */
+void se_release_device_for_hba(struct se_device *dev)
+{
+	struct se_hba *hba = dev->se_hba;
+
+	if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
+	    (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
+	    (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
+	    (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
+	    (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
+		se_dev_stop(dev);
+
+	if (dev->dev_ptr) {
+		kthread_stop(dev->process_thread);
+		if (dev->transport->free_device)
+			dev->transport->free_device(dev->dev_ptr);
+	}
+
+	spin_lock(&hba->device_lock);
+	list_del(&dev->dev_list);
+	hba->dev_count--;
+	spin_unlock(&hba->device_lock);
+
+	core_scsi3_free_all_registrations(dev);
+	se_release_vpd_for_dev(dev);
+
+	kfree(dev);
+}
+
+void se_release_vpd_for_dev(struct se_device *dev)
+{
+	struct t10_vpd *vpd, *vpd_tmp;
+
+	spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
+	list_for_each_entry_safe(vpd, vpd_tmp,
+			&dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
+		list_del(&vpd->vpd_list);
+		kfree(vpd);
+	}
+	spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
+}
+
+/*	se_free_virtual_device():
+ *
+ *	Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
+ */
+int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
+{
+	if (!list_empty(&dev->dev_sep_list))
+		dump_stack();
+
+	core_alua_free_lu_gp_mem(dev);
+	se_release_device_for_hba(dev);
+
+	return 0;
+}
+
+static void se_dev_start(struct se_device *dev)
+{
+	struct se_hba *hba = dev->se_hba;
+
+	spin_lock(&hba->device_lock);
+	atomic_inc(&dev->dev_obj.obj_access_count);
+	if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
+		if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
+			dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
+			dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
+		} else if (dev->dev_status &
+			   TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
+			dev->dev_status &=
+				~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
+			dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
+		}
+	}
+	spin_unlock(&hba->device_lock);
+}
+
+static void se_dev_stop(struct se_device *dev)
+{
+	struct se_hba *hba = dev->se_hba;
+
+	spin_lock(&hba->device_lock);
+	atomic_dec(&dev->dev_obj.obj_access_count);
+	if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
+		if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
+			dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
+			dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
+		} else if (dev->dev_status &
+			   TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
+			dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
+			dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
+		}
+	}
+	spin_unlock(&hba->device_lock);
+}
+
+int se_dev_check_online(struct se_device *dev)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&dev->dev_status_lock, flags);
+	ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
+	       (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
+	spin_unlock_irqrestore(&dev->dev_status_lock, flags);
+
+	return ret;
+}
+
+int se_dev_check_shutdown(struct se_device *dev)
+{
+	int ret;
+
+	spin_lock_irq(&dev->dev_status_lock);
+	ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
+	spin_unlock_irq(&dev->dev_status_lock);
+
+	return ret;
+}
+
+u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
+{
+	u32 aligned_max_sectors;
+	u32 alignment;
+	/*
+	 * Limit max_sectors to a PAGE_SIZE aligned value for modern
+	 * transport_allocate_data_tasks() operation.
+	 */
+	alignment = max(1ul, PAGE_SIZE / block_size);
+	aligned_max_sectors = rounddown(max_sectors, alignment);
+
+	if (max_sectors != aligned_max_sectors)
+		pr_info("Rounding down aligned max_sectors from %u to %u\n",
+			max_sectors, aligned_max_sectors);
+
+	return aligned_max_sectors;
+}
+
+void se_dev_set_default_attribs(
+	struct se_device *dev,
+	struct se_dev_limits *dev_limits)
+{
+	struct queue_limits *limits = &dev_limits->limits;
+
+	dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
+	dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
+	dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
+	dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
+	dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
+	dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
+	dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
+	dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
+	dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
+	dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
+	dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
+	dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
+	dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
+	/*
+	 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
+	 * iblock_create_virtdevice() from struct queue_limits values
+	 * if blk_queue_discard()==1
+	 */
+	dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
+	dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
+		DA_MAX_UNMAP_BLOCK_DESC_COUNT;
+	dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
+	dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
+				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
+	/*
+	 * block_size is based on subsystem plugin dependent requirements.
+	 */
+	dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
+	dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
+	/*
+	 * max_sectors is based on subsystem plugin dependent requirements.
+	 */
+	dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
+	/*
+	 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
+	 */
+	limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
+						limits->logical_block_size);
+	dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
+	/*
+	 * Set fabric_max_sectors, which is reported in block limits
+	 * VPD page (B0h).
+	 */
+	dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
+	/*
+	 * Set optimal_sectors from fabric_max_sectors, which can be
+	 * lowered via configfs.
+	 */
+	dev->se_sub_dev->se_dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
+	/*
+	 * queue_depth is based on subsystem plugin dependent requirements.
+	 */
+	dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
+	dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
+}
+
+int se_dev_set_max_unmap_lba_count(
+	struct se_device *dev,
+	u32 max_unmap_lba_count)
+{
+	dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
+	pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
+			dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
+	return 0;
+}
+
+int se_dev_set_max_unmap_block_desc_count(
+	struct se_device *dev,
+	u32 max_unmap_block_desc_count)
+{
+	dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
+		max_unmap_block_desc_count;
+	pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
+			dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
+	return 0;
+}
+
+int se_dev_set_unmap_granularity(
+	struct se_device *dev,
+	u32 unmap_granularity)
+{
+	dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
+	pr_debug("dev[%p]: Set unmap_granularity: %u\n",
+			dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
+	return 0;
+}
+
+int se_dev_set_unmap_granularity_alignment(
+	struct se_device *dev,
+	u32 unmap_granularity_alignment)
+{
+	dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
+	pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
+			dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
+	return 0;
+}
+
+int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
+{
+	if (flag != 0 && flag != 1) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	if (flag) {
+		pr_err("dpo_emulated not supported\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
+{
+	if (flag != 0 && flag != 1) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	if (flag && dev->transport->fua_write_emulated == 0) {
+		pr_err("fua_write_emulated not supported\n");
+		return -EINVAL;
+	}
+	dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
+	pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
+			dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
+	return 0;
+}
+
+int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
+{
+	if (flag != 0 && flag != 1) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	if (flag) {
+		pr_err("ua read emulated not supported\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
+{
+	if (flag != 0 && flag != 1) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+	if (flag && dev->transport->write_cache_emulated == 0) {
+		pr_err("write_cache_emulated not supported\n");
+		return -EINVAL;
+	}
+	dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
+	pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
+			dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
+	return 0;
+}
+
+int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1) && (flag != 2)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		pr_err("dev[%p]: Unable to change SE Device"
+			" UA_INTRLCK_CTRL while dev_export_obj: %d count"
+			" exists\n", dev,
+			atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -EINVAL;
+	}
+	dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
+	pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
+		dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
+
+	return 0;
+}
+
+int se_dev_set_emulate_tas(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		pr_err("dev[%p]: Unable to change SE Device TAS while"
+			" dev_export_obj: %d count exists\n", dev,
+			atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -EINVAL;
+	}
+	dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
+	pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
+		dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
+
+	return 0;
+}
+
+int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+	/*
+	 * We expect this value to be non-zero when generic Block Layer
+	 * Discard supported is detected iblock_create_virtdevice().
+	 */
+	if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+		pr_err("Generic Block Discard not supported\n");
+		return -ENOSYS;
+	}
+
+	dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
+	pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
+				dev, flag);
+	return 0;
+}
+
+int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+	/*
+	 * We expect this value to be non-zero when generic Block Layer
+	 * Discard supported is detected iblock_create_virtdevice().
+	 */
+	if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+		pr_err("Generic Block Discard not supported\n");
+		return -ENOSYS;
+	}
+
+	dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
+	pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
+				dev, flag);
+	return 0;
+}
+
+int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+	dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
+	pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
+		(dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
+	return 0;
+}
+
+int se_dev_set_is_nonrot(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+	dev->se_sub_dev->se_dev_attrib.is_nonrot = flag;
+	pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
+	       dev, flag);
+	return 0;
+}
+
+int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
+{
+	if (flag != 0) {
+		printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
+			" reordering not implemented\n", dev);
+		return -ENOSYS;
+	}
+	dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag;
+	pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
+	return 0;
+}
+
+/*
+ * Note, this can only be called on unexported SE Device Object.
+ */
+int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
+{
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		pr_err("dev[%p]: Unable to change SE Device TCQ while"
+			" dev_export_obj: %d count exists\n", dev,
+			atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -EINVAL;
+	}
+	if (!queue_depth) {
+		pr_err("dev[%p]: Illegal ZERO value for queue"
+			"_depth\n", dev);
+		return -EINVAL;
+	}
+
+	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
+			pr_err("dev[%p]: Passed queue_depth: %u"
+				" exceeds TCM/SE_Device TCQ: %u\n",
+				dev, queue_depth,
+				dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
+			return -EINVAL;
+		}
+	} else {
+		if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
+			if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
+				pr_err("dev[%p]: Passed queue_depth:"
+					" %u exceeds TCM/SE_Device MAX"
+					" TCQ: %u\n", dev, queue_depth,
+					dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
+				return -EINVAL;
+			}
+		}
+	}
+
+	dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
+	pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
+			dev, queue_depth);
+	return 0;
+}
+
+int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
+{
+	int force = 0; /* Force setting for VDEVS */
+
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		pr_err("dev[%p]: Unable to change SE Device"
+			" max_sectors while dev_export_obj: %d count exists\n",
+			dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -EINVAL;
+	}
+	if (!max_sectors) {
+		pr_err("dev[%p]: Illegal ZERO value for"
+			" max_sectors\n", dev);
+		return -EINVAL;
+	}
+	if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
+		pr_err("dev[%p]: Passed max_sectors: %u less than"
+			" DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
+				DA_STATUS_MAX_SECTORS_MIN);
+		return -EINVAL;
+	}
+	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
+			pr_err("dev[%p]: Passed max_sectors: %u"
+				" greater than TCM/SE_Device max_sectors:"
+				" %u\n", dev, max_sectors,
+				dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+			 return -EINVAL;
+		}
+	} else {
+		if (!force && (max_sectors >
+				 dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
+			pr_err("dev[%p]: Passed max_sectors: %u"
+				" greater than TCM/SE_Device max_sectors"
+				": %u, use force=1 to override.\n", dev,
+				max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+			return -EINVAL;
+		}
+		if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
+			pr_err("dev[%p]: Passed max_sectors: %u"
+				" greater than DA_STATUS_MAX_SECTORS_MAX:"
+				" %u\n", dev, max_sectors,
+				DA_STATUS_MAX_SECTORS_MAX);
+			return -EINVAL;
+		}
+	}
+	/*
+	 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
+	 */
+	max_sectors = se_dev_align_max_sectors(max_sectors,
+				dev->se_sub_dev->se_dev_attrib.block_size);
+
+	dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
+	pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
+			dev, max_sectors);
+	return 0;
+}
+
+int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
+{
+	int block_size = dev->se_sub_dev->se_dev_attrib.block_size;
+
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		pr_err("dev[%p]: Unable to change SE Device"
+			" fabric_max_sectors while dev_export_obj: %d count exists\n",
+			dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -EINVAL;
+	}
+	if (!fabric_max_sectors) {
+		pr_err("dev[%p]: Illegal ZERO value for"
+			" fabric_max_sectors\n", dev);
+		return -EINVAL;
+	}
+	if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
+		pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
+			" DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
+				DA_STATUS_MAX_SECTORS_MIN);
+		return -EINVAL;
+	}
+	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		if (fabric_max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
+			pr_err("dev[%p]: Passed fabric_max_sectors: %u"
+				" greater than TCM/SE_Device max_sectors:"
+				" %u\n", dev, fabric_max_sectors,
+				dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+			 return -EINVAL;
+		}
+	} else {
+		if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
+			pr_err("dev[%p]: Passed fabric_max_sectors: %u"
+				" greater than DA_STATUS_MAX_SECTORS_MAX:"
+				" %u\n", dev, fabric_max_sectors,
+				DA_STATUS_MAX_SECTORS_MAX);
+			return -EINVAL;
+		}
+	}
+	/*
+	 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
+	 */
+	if (!block_size) {
+		block_size = 512;
+		pr_warn("Defaulting to 512 for zero block_size\n");
+	}
+	fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
+						      block_size);
+
+	dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors;
+	pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
+			dev, fabric_max_sectors);
+	return 0;
+}
+
+int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
+{
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		pr_err("dev[%p]: Unable to change SE Device"
+			" optimal_sectors while dev_export_obj: %d count exists\n",
+			dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -EINVAL;
+	}
+	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		pr_err("dev[%p]: Passed optimal_sectors cannot be"
+				" changed for TCM/pSCSI\n", dev);
+		return -EINVAL;
+	}
+	if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
+		pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
+			" greater than fabric_max_sectors: %u\n", dev,
+			optimal_sectors, dev->se_sub_dev->se_dev_attrib.fabric_max_sectors);
+		return -EINVAL;
+	}
+
+	dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
+	pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
+			dev, optimal_sectors);
+	return 0;
+}
+
+int se_dev_set_block_size(struct se_device *dev, u32 block_size)
+{
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		pr_err("dev[%p]: Unable to change SE Device block_size"
+			" while dev_export_obj: %d count exists\n", dev,
+			atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -EINVAL;
+	}
+
+	if ((block_size != 512) &&
+	    (block_size != 1024) &&
+	    (block_size != 2048) &&
+	    (block_size != 4096)) {
+		pr_err("dev[%p]: Illegal value for block_device: %u"
+			" for SE device, must be 512, 1024, 2048 or 4096\n",
+			dev, block_size);
+		return -EINVAL;
+	}
+
+	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		pr_err("dev[%p]: Not allowed to change block_size for"
+			" Physical Device, use for Linux/SCSI to change"
+			" block_size for underlying hardware\n", dev);
+		return -EINVAL;
+	}
+
+	dev->se_sub_dev->se_dev_attrib.block_size = block_size;
+	pr_debug("dev[%p]: SE Device block_size changed to %u\n",
+			dev, block_size);
+	return 0;
+}
+
+struct se_lun *core_dev_add_lun(
+	struct se_portal_group *tpg,
+	struct se_hba *hba,
+	struct se_device *dev,
+	u32 lun)
+{
+	struct se_lun *lun_p;
+	u32 lun_access = 0;
+	int rc;
+
+	if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
+		pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
+			atomic_read(&dev->dev_access_obj.obj_access_count));
+		return ERR_PTR(-EACCES);
+	}
+
+	lun_p = core_tpg_pre_addlun(tpg, lun);
+	if (IS_ERR(lun_p))
+		return lun_p;
+
+	if (dev->dev_flags & DF_READ_ONLY)
+		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+	else
+		lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+
+	rc = core_tpg_post_addlun(tpg, lun_p, lun_access, dev);
+	if (rc < 0)
+		return ERR_PTR(rc);
+
+	pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
+		" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
+		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
+		tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
+	/*
+	 * Update LUN maps for dynamically added initiators when
+	 * generate_node_acl is enabled.
+	 */
+	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
+		struct se_node_acl *acl;
+		spin_lock_irq(&tpg->acl_node_lock);
+		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+			if (acl->dynamic_node_acl &&
+			    (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
+			     !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
+				spin_unlock_irq(&tpg->acl_node_lock);
+				core_tpg_add_node_to_devs(acl, tpg);
+				spin_lock_irq(&tpg->acl_node_lock);
+			}
+		}
+		spin_unlock_irq(&tpg->acl_node_lock);
+	}
+
+	return lun_p;
+}
+
+/*      core_dev_del_lun():
+ *
+ *
+ */
+int core_dev_del_lun(
+	struct se_portal_group *tpg,
+	u32 unpacked_lun)
+{
+	struct se_lun *lun;
+
+	lun = core_tpg_pre_dellun(tpg, unpacked_lun);
+	if (IS_ERR(lun))
+		return PTR_ERR(lun);
+
+	core_tpg_post_dellun(tpg, lun);
+
+	pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
+		" device object\n", tpg->se_tpg_tfo->get_fabric_name(),
+		tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
+		tpg->se_tpg_tfo->get_fabric_name());
+
+	return 0;
+}
+
+struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
+{
+	struct se_lun *lun;
+
+	spin_lock(&tpg->tpg_lun_lock);
+	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
+			"_PER_TPG-1: %u for Target Portal Group: %hu\n",
+			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+			TRANSPORT_MAX_LUNS_PER_TPG-1,
+			tpg->se_tpg_tfo->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return NULL;
+	}
+	lun = tpg->tpg_lun_list[unpacked_lun];
+
+	if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
+		pr_err("%s Logical Unit Number: %u is not free on"
+			" Target Portal Group: %hu, ignoring request.\n",
+			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+			tpg->se_tpg_tfo->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return NULL;
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return lun;
+}
+
+/*      core_dev_get_lun():
+ *
+ *
+ */
+static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
+{
+	struct se_lun *lun;
+
+	spin_lock(&tpg->tpg_lun_lock);
+	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
+			"_TPG-1: %u for Target Portal Group: %hu\n",
+			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+			TRANSPORT_MAX_LUNS_PER_TPG-1,
+			tpg->se_tpg_tfo->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return NULL;
+	}
+	lun = tpg->tpg_lun_list[unpacked_lun];
+
+	if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
+		pr_err("%s Logical Unit Number: %u is not active on"
+			" Target Portal Group: %hu, ignoring request.\n",
+			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+			tpg->se_tpg_tfo->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return NULL;
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return lun;
+}
+
+struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
+	struct se_portal_group *tpg,
+	struct se_node_acl *nacl,
+	u32 mapped_lun,
+	int *ret)
+{
+	struct se_lun_acl *lacl;
+
+	if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
+		pr_err("%s InitiatorName exceeds maximum size.\n",
+			tpg->se_tpg_tfo->get_fabric_name());
+		*ret = -EOVERFLOW;
+		return NULL;
+	}
+	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
+	if (!lacl) {
+		pr_err("Unable to allocate memory for struct se_lun_acl.\n");
+		*ret = -ENOMEM;
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&lacl->lacl_list);
+	lacl->mapped_lun = mapped_lun;
+	lacl->se_lun_nacl = nacl;
+	snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
+		 nacl->initiatorname);
+
+	return lacl;
+}
+
+int core_dev_add_initiator_node_lun_acl(
+	struct se_portal_group *tpg,
+	struct se_lun_acl *lacl,
+	u32 unpacked_lun,
+	u32 lun_access)
+{
+	struct se_lun *lun;
+	struct se_node_acl *nacl;
+
+	lun = core_dev_get_lun(tpg, unpacked_lun);
+	if (!lun) {
+		pr_err("%s Logical Unit Number: %u is not active on"
+			" Target Portal Group: %hu, ignoring request.\n",
+			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+			tpg->se_tpg_tfo->tpg_get_tag(tpg));
+		return -EINVAL;
+	}
+
+	nacl = lacl->se_lun_nacl;
+	if (!nacl)
+		return -EINVAL;
+
+	if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
+	    (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
+		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+
+	lacl->se_lun = lun;
+
+	if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
+			lun_access, nacl, tpg, 1) < 0)
+		return -EINVAL;
+
+	spin_lock(&lun->lun_acl_lock);
+	list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
+	atomic_inc(&lun->lun_acl_count);
+	smp_mb__after_atomic_inc();
+	spin_unlock(&lun->lun_acl_lock);
+
+	pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
+		" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+		tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
+		(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
+		lacl->initiatorname);
+	/*
+	 * Check to see if there are any existing persistent reservation APTPL
+	 * pre-registrations that need to be enabled for this LUN ACL..
+	 */
+	core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
+	return 0;
+}
+
+/*      core_dev_del_initiator_node_lun_acl():
+ *
+ *
+ */
+int core_dev_del_initiator_node_lun_acl(
+	struct se_portal_group *tpg,
+	struct se_lun *lun,
+	struct se_lun_acl *lacl)
+{
+	struct se_node_acl *nacl;
+
+	nacl = lacl->se_lun_nacl;
+	if (!nacl)
+		return -EINVAL;
+
+	spin_lock(&lun->lun_acl_lock);
+	list_del(&lacl->lacl_list);
+	atomic_dec(&lun->lun_acl_count);
+	smp_mb__after_atomic_dec();
+	spin_unlock(&lun->lun_acl_lock);
+
+	core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
+		TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+
+	lacl->se_lun = NULL;
+
+	pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
+		" InitiatorNode: %s Mapped LUN: %u\n",
+		tpg->se_tpg_tfo->get_fabric_name(),
+		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
+		lacl->initiatorname, lacl->mapped_lun);
+
+	return 0;
+}
+
+void core_dev_free_initiator_node_lun_acl(
+	struct se_portal_group *tpg,
+	struct se_lun_acl *lacl)
+{
+	pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
+		" Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
+		tpg->se_tpg_tfo->tpg_get_tag(tpg),
+		tpg->se_tpg_tfo->get_fabric_name(),
+		lacl->initiatorname, lacl->mapped_lun);
+
+	kfree(lacl);
+}
+
+int core_dev_setup_virtual_lun0(void)
+{
+	struct se_hba *hba;
+	struct se_device *dev;
+	struct se_subsystem_dev *se_dev = NULL;
+	struct se_subsystem_api *t;
+	char buf[16];
+	int ret;
+
+	hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
+	if (IS_ERR(hba))
+		return PTR_ERR(hba);
+
+	lun0_hba = hba;
+	t = hba->transport;
+
+	se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
+	if (!se_dev) {
+		pr_err("Unable to allocate memory for"
+				" struct se_subsystem_dev\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+	INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
+	spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
+	INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
+	INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
+	spin_lock_init(&se_dev->t10_pr.registration_lock);
+	spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
+	INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
+	spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
+	spin_lock_init(&se_dev->se_dev_lock);
+	se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+	se_dev->t10_wwn.t10_sub_dev = se_dev;
+	se_dev->t10_alua.t10_sub_dev = se_dev;
+	se_dev->se_dev_attrib.da_sub_dev = se_dev;
+	se_dev->se_dev_hba = hba;
+
+	se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
+	if (!se_dev->se_dev_su_ptr) {
+		pr_err("Unable to locate subsystem dependent pointer"
+			" from allocate_virtdevice()\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+	lun0_su_dev = se_dev;
+
+	memset(buf, 0, 16);
+	sprintf(buf, "rd_pages=8");
+	t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
+
+	dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
+	if (IS_ERR(dev)) {
+		ret = PTR_ERR(dev);
+		goto out;
+	}
+	dev->dev_link_magic = SE_DEV_LINK_MAGIC;
+	se_dev->se_dev_ptr = dev;
+	g_lun0_dev = dev;
+
+	return 0;
+out:
+	lun0_su_dev = NULL;
+	kfree(se_dev);
+	if (lun0_hba) {
+		core_delete_hba(lun0_hba);
+		lun0_hba = NULL;
+	}
+	return ret;
+}
+
+
+void core_dev_release_virtual_lun0(void)
+{
+	struct se_hba *hba = lun0_hba;
+	struct se_subsystem_dev *su_dev = lun0_su_dev;
+
+	if (!hba)
+		return;
+
+	if (g_lun0_dev)
+		se_free_virtual_device(g_lun0_dev, hba);
+
+	kfree(su_dev);
+	core_delete_hba(hba);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_fabric_configfs.c b/ap/os/linux/linux-3.4.x/drivers/target/target_core_fabric_configfs.c
new file mode 100644
index 0000000..6b79ee7
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_fabric_configfs.c
@@ -0,0 +1,1223 @@
+/*******************************************************************************
+* Filename: target_core_fabric_configfs.c
+ *
+ * This file contains generic fabric module configfs infrastructure for
+ * TCM v4.x code
+ *
+ * Copyright (c) 2010,2011 Rising Tide Systems
+ * Copyright (c) 2010,2011 Linux-iSCSI.org
+ *
+ * Copyright (c) Nicholas A. Bellinger <nab@linux-iscsi.org>
+*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/syscalls.h>
+#include <linux/configfs.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "target_core_internal.h"
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+
+#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs)		\
+static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
+{									\
+	struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl;	\
+	struct config_item_type *cit = &tfc->tfc_##_name##_cit;		\
+									\
+	cit->ct_item_ops = _item_ops;					\
+	cit->ct_group_ops = _group_ops;					\
+	cit->ct_attrs = _attrs;						\
+	cit->ct_owner = tf->tf_module;					\
+	pr_debug("Setup generic %s\n", __stringify(_name));		\
+}
+
+/* Start of tfc_tpg_mappedlun_cit */
+
+static int target_fabric_mappedlun_link(
+	struct config_item *lun_acl_ci,
+	struct config_item *lun_ci)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *lun = container_of(to_config_group(lun_ci),
+			struct se_lun, lun_group);
+	struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
+			struct se_lun_acl, se_lun_group);
+	struct se_portal_group *se_tpg;
+	struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
+	int ret = 0, lun_access;
+
+	if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
+		pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
+			" %p to struct lun: %p\n", lun_ci, lun);
+		return -EFAULT;
+	}
+	/*
+	 * Ensure that the source port exists
+	 */
+	if (!lun->lun_sep || !lun->lun_sep->sep_tpg) {
+		pr_err("Source se_lun->lun_sep or lun->lun_sep->sep"
+				"_tpg does not exist\n");
+		return -EINVAL;
+	}
+	se_tpg = lun->lun_sep->sep_tpg;
+
+	nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
+	tpg_ci = &nacl_ci->ci_group->cg_item;
+	wwn_ci = &tpg_ci->ci_group->cg_item;
+	tpg_ci_s = &lun_ci->ci_parent->ci_group->cg_item;
+	wwn_ci_s = &tpg_ci_s->ci_group->cg_item;
+	/*
+	 * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
+	 */
+	if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
+		pr_err("Illegal Initiator ACL SymLink outside of %s\n",
+			config_item_name(wwn_ci));
+		return -EINVAL;
+	}
+	if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
+		pr_err("Illegal Initiator ACL Symlink outside of %s"
+			" TPGT: %s\n", config_item_name(wwn_ci),
+			config_item_name(tpg_ci));
+		return -EINVAL;
+	}
+	/*
+	 * If this struct se_node_acl was dynamically generated with
+	 * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags,
+	 * which be will write protected (READ-ONLY) when
+	 * tpg_1/attrib/demo_mode_write_protect=1
+	 */
+	spin_lock_irq(&lacl->se_lun_nacl->device_list_lock);
+	deve = lacl->se_lun_nacl->device_list[lacl->mapped_lun];
+	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)
+		lun_access = deve->lun_flags;
+	else
+		lun_access =
+			(se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
+				se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
+					   TRANSPORT_LUNFLAGS_READ_WRITE;
+	spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
+	/*
+	 * Determine the actual mapped LUN value user wants..
+	 *
+	 * This value is what the SCSI Initiator actually sees the
+	 * iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
+	 */
+	ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl,
+			lun->unpacked_lun, lun_access);
+
+	return (ret < 0) ? -EINVAL : 0;
+}
+
+static int target_fabric_mappedlun_unlink(
+	struct config_item *lun_acl_ci,
+	struct config_item *lun_ci)
+{
+	struct se_lun *lun;
+	struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
+			struct se_lun_acl, se_lun_group);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve = nacl->device_list[lacl->mapped_lun];
+	struct se_portal_group *se_tpg;
+	/*
+	 * Determine if the underlying MappedLUN has already been released..
+	 */
+	if (!deve->se_lun)
+		return 0;
+
+	lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
+	se_tpg = lun->lun_sep->sep_tpg;
+
+	core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl);
+	return 0;
+}
+
+CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl);
+#define TCM_MAPPEDLUN_ATTR(_name, _mode)				\
+static struct target_fabric_mappedlun_attribute target_fabric_mappedlun_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_fabric_mappedlun_show_##_name,				\
+	target_fabric_mappedlun_store_##_name);
+
+static ssize_t target_fabric_mappedlun_show_write_protect(
+	struct se_lun_acl *lacl,
+	char *page)
+{
+	struct se_node_acl *se_nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t len;
+
+	spin_lock_irq(&se_nacl->device_list_lock);
+	deve = se_nacl->device_list[lacl->mapped_lun];
+	len = sprintf(page, "%d\n",
+			(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ?
+			1 : 0);
+	spin_unlock_irq(&se_nacl->device_list_lock);
+
+	return len;
+}
+
+static ssize_t target_fabric_mappedlun_store_write_protect(
+	struct se_lun_acl *lacl,
+	const char *page,
+	size_t count)
+{
+	struct se_node_acl *se_nacl = lacl->se_lun_nacl;
+	struct se_portal_group *se_tpg = se_nacl->se_tpg;
+	unsigned long op;
+
+	if (strict_strtoul(page, 0, &op))
+		return -EINVAL;
+
+	if ((op != 1) && (op != 0))
+		return -EINVAL;
+
+	core_update_device_list_access(lacl->mapped_lun, (op) ?
+			TRANSPORT_LUNFLAGS_READ_ONLY :
+			TRANSPORT_LUNFLAGS_READ_WRITE,
+			lacl->se_lun_nacl);
+
+	pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
+		" Mapped LUN: %u Write Protect bit to %s\n",
+		se_tpg->se_tpg_tfo->get_fabric_name(),
+		lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
+
+	return count;
+
+}
+
+TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group);
+
+static void target_fabric_mappedlun_release(struct config_item *item)
+{
+	struct se_lun_acl *lacl = container_of(to_config_group(item),
+				struct se_lun_acl, se_lun_group);
+	struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
+
+	core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
+}
+
+static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
+	&target_fabric_mappedlun_write_protect.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
+	.release		= target_fabric_mappedlun_release,
+	.show_attribute		= target_fabric_mappedlun_attr_show,
+	.store_attribute	= target_fabric_mappedlun_attr_store,
+	.allow_link		= target_fabric_mappedlun_link,
+	.drop_link		= target_fabric_mappedlun_unlink,
+};
+
+TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL,
+		target_fabric_mappedlun_attrs);
+
+/* End of tfc_tpg_mappedlun_cit */
+
+/* Start of tfc_tpg_mappedlun_port_cit */
+
+static struct config_group *target_core_mappedlun_stat_mkdir(
+	struct config_group *group,
+	const char *name)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static void target_core_mappedlun_stat_rmdir(
+	struct config_group *group,
+	struct config_item *item)
+{
+	return;
+}
+
+static struct configfs_group_operations target_fabric_mappedlun_stat_group_ops = {
+	.make_group		= target_core_mappedlun_stat_mkdir,
+	.drop_item		= target_core_mappedlun_stat_rmdir,
+};
+
+TF_CIT_SETUP(tpg_mappedlun_stat, NULL, &target_fabric_mappedlun_stat_group_ops,
+		NULL);
+
+/* End of tfc_tpg_mappedlun_port_cit */
+
+/* Start of tfc_tpg_nacl_attrib_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group);
+
+static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = {
+	.show_attribute		= target_fabric_nacl_attrib_attr_show,
+	.store_attribute	= target_fabric_nacl_attrib_attr_store,
+};
+
+TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_nacl_attrib_cit */
+
+/* Start of tfc_tpg_nacl_auth_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_auth, se_node_acl, acl_auth_group);
+
+static struct configfs_item_operations target_fabric_nacl_auth_item_ops = {
+	.show_attribute		= target_fabric_nacl_auth_attr_show,
+	.store_attribute	= target_fabric_nacl_auth_attr_store,
+};
+
+TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_nacl_auth_cit */
+
+/* Start of tfc_tpg_nacl_param_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_param, se_node_acl, acl_param_group);
+
+static struct configfs_item_operations target_fabric_nacl_param_item_ops = {
+	.show_attribute		= target_fabric_nacl_param_attr_show,
+	.store_attribute	= target_fabric_nacl_param_attr_store,
+};
+
+TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_nacl_param_cit */
+
+/* Start of tfc_tpg_nacl_base_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_base, se_node_acl, acl_group);
+
+static struct config_group *target_fabric_make_mappedlun(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_node_acl *se_nacl = container_of(group,
+			struct se_node_acl, acl_group);
+	struct se_portal_group *se_tpg = se_nacl->se_tpg;
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct se_lun_acl *lacl;
+	struct config_item *acl_ci;
+	struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
+	char *buf;
+	unsigned long mapped_lun;
+	int ret = 0;
+
+	acl_ci = &group->cg_item;
+	if (!acl_ci) {
+		pr_err("Unable to locatel acl_ci\n");
+		return NULL;
+	}
+
+	buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
+	if (!buf) {
+		pr_err("Unable to allocate memory for name buf\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	snprintf(buf, strlen(name) + 1, "%s", name);
+	/*
+	 * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID.
+	 */
+	if (strstr(buf, "lun_") != buf) {
+		pr_err("Unable to locate \"lun_\" from buf: %s"
+			" name: %s\n", buf, name);
+		ret = -EINVAL;
+		goto out;
+	}
+	/*
+	 * Determine the Mapped LUN value.  This is what the SCSI Initiator
+	 * Port will actually see.
+	 */
+	if (strict_strtoul(buf + 4, 0, &mapped_lun) || mapped_lun > UINT_MAX) {
+		ret = -EINVAL;
+		goto out;
+	}
+	if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+		pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+			"-1: %u for Target Portal Group: %u\n", mapped_lun,
+			TRANSPORT_MAX_LUNS_PER_TPG-1,
+			se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
+		ret = -EINVAL;
+		goto out;
+	}
+
+	lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl,
+			mapped_lun, &ret);
+	if (!lacl) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	lacl_cg = &lacl->se_lun_group;
+	lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+				GFP_KERNEL);
+	if (!lacl_cg->default_groups) {
+		pr_err("Unable to allocate lacl_cg->default_groups\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	config_group_init_type_name(&lacl->se_lun_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit);
+	config_group_init_type_name(&lacl->ml_stat_grps.stat_group,
+			"statistics", &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_stat_cit);
+	lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
+	lacl_cg->default_groups[1] = NULL;
+
+	ml_stat_grp = &lacl->ml_stat_grps.stat_group;
+	ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
+				GFP_KERNEL);
+	if (!ml_stat_grp->default_groups) {
+		pr_err("Unable to allocate ml_stat_grp->default_groups\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+	target_stat_setup_mappedlun_default_groups(lacl);
+
+	kfree(buf);
+	return &lacl->se_lun_group;
+out:
+	if (lacl_cg)
+		kfree(lacl_cg->default_groups);
+	kfree(buf);
+	return ERR_PTR(ret);
+}
+
+static void target_fabric_drop_mappedlun(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_lun_acl *lacl = container_of(to_config_group(item),
+			struct se_lun_acl, se_lun_group);
+	struct config_item *df_item;
+	struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
+	int i;
+
+	ml_stat_grp = &lacl->ml_stat_grps.stat_group;
+	for (i = 0; ml_stat_grp->default_groups[i]; i++) {
+		df_item = &ml_stat_grp->default_groups[i]->cg_item;
+		ml_stat_grp->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(ml_stat_grp->default_groups);
+
+	lacl_cg = &lacl->se_lun_group;
+	for (i = 0; lacl_cg->default_groups[i]; i++) {
+		df_item = &lacl_cg->default_groups[i]->cg_item;
+		lacl_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(lacl_cg->default_groups);
+
+	config_item_put(item);
+}
+
+static void target_fabric_nacl_base_release(struct config_item *item)
+{
+	struct se_node_acl *se_nacl = container_of(to_config_group(item),
+			struct se_node_acl, acl_group);
+	struct se_portal_group *se_tpg = se_nacl->se_tpg;
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+	tf->tf_ops.fabric_drop_nodeacl(se_nacl);
+}
+
+static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
+	.release		= target_fabric_nacl_base_release,
+	.show_attribute		= target_fabric_nacl_base_attr_show,
+	.store_attribute	= target_fabric_nacl_base_attr_store,
+};
+
+static struct configfs_group_operations target_fabric_nacl_base_group_ops = {
+	.make_group		= target_fabric_make_mappedlun,
+	.drop_item		= target_fabric_drop_mappedlun,
+};
+
+TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
+		&target_fabric_nacl_base_group_ops, NULL);
+
+/* End of tfc_tpg_nacl_base_cit */
+
+/* Start of tfc_node_fabric_stats_cit */
+/*
+ * This is used as a placeholder for struct se_node_acl->acl_fabric_stat_group
+ * to allow fabrics access to ->acl_fabric_stat_group->default_groups[]
+ */
+TF_CIT_SETUP(tpg_nacl_stat, NULL, NULL, NULL);
+
+/* End of tfc_wwn_fabric_stats_cit */
+
+/* Start of tfc_tpg_nacl_cit */
+
+static struct config_group *target_fabric_make_nodeacl(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_portal_group *se_tpg = container_of(group,
+			struct se_portal_group, tpg_acl_group);
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct se_node_acl *se_nacl;
+	struct config_group *nacl_cg;
+
+	if (!tf->tf_ops.fabric_make_nodeacl) {
+		pr_err("tf->tf_ops.fabric_make_nodeacl is NULL\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
+	if (IS_ERR(se_nacl))
+		return ERR_CAST(se_nacl);
+
+	nacl_cg = &se_nacl->acl_group;
+	nacl_cg->default_groups = se_nacl->acl_default_groups;
+	nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group;
+	nacl_cg->default_groups[1] = &se_nacl->acl_auth_group;
+	nacl_cg->default_groups[2] = &se_nacl->acl_param_group;
+	nacl_cg->default_groups[3] = &se_nacl->acl_fabric_stat_group;
+	nacl_cg->default_groups[4] = NULL;
+
+	config_group_init_type_name(&se_nacl->acl_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit);
+	config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit);
+	config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit);
+	config_group_init_type_name(&se_nacl->acl_param_group, "param",
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit);
+	config_group_init_type_name(&se_nacl->acl_fabric_stat_group,
+			"fabric_statistics",
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_stat_cit);
+
+	return &se_nacl->acl_group;
+}
+
+static void target_fabric_drop_nodeacl(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_node_acl *se_nacl = container_of(to_config_group(item),
+			struct se_node_acl, acl_group);
+	struct config_item *df_item;
+	struct config_group *nacl_cg;
+	int i;
+
+	nacl_cg = &se_nacl->acl_group;
+	for (i = 0; nacl_cg->default_groups[i]; i++) {
+		df_item = &nacl_cg->default_groups[i]->cg_item;
+		nacl_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	/*
+	 * struct se_node_acl free is done in target_fabric_nacl_base_release()
+	 */
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_fabric_nacl_group_ops = {
+	.make_group	= target_fabric_make_nodeacl,
+	.drop_item	= target_fabric_drop_nodeacl,
+};
+
+TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
+
+/* End of tfc_tpg_nacl_cit */
+
+/* Start of tfc_tpg_np_base_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group);
+
+static void target_fabric_np_base_release(struct config_item *item)
+{
+	struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
+				struct se_tpg_np, tpg_np_group);
+	struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent;
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+	tf->tf_ops.fabric_drop_np(se_tpg_np);
+}
+
+static struct configfs_item_operations target_fabric_np_base_item_ops = {
+	.release		= target_fabric_np_base_release,
+	.show_attribute		= target_fabric_np_base_attr_show,
+	.store_attribute	= target_fabric_np_base_attr_store,
+};
+
+TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_np_base_cit */
+
+/* Start of tfc_tpg_np_cit */
+
+static struct config_group *target_fabric_make_np(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_portal_group *se_tpg = container_of(group,
+				struct se_portal_group, tpg_np_group);
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct se_tpg_np *se_tpg_np;
+
+	if (!tf->tf_ops.fabric_make_np) {
+		pr_err("tf->tf_ops.fabric_make_np is NULL\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
+	if (!se_tpg_np || IS_ERR(se_tpg_np))
+		return ERR_PTR(-EINVAL);
+
+	se_tpg_np->tpg_np_parent = se_tpg;
+	config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
+
+	return &se_tpg_np->tpg_np_group;
+}
+
+static void target_fabric_drop_np(
+	struct config_group *group,
+	struct config_item *item)
+{
+	/*
+	 * struct se_tpg_np is released via target_fabric_np_base_release()
+	 */
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_fabric_np_group_ops = {
+	.make_group	= &target_fabric_make_np,
+	.drop_item	= &target_fabric_drop_np,
+};
+
+TF_CIT_SETUP(tpg_np, NULL, &target_fabric_np_group_ops, NULL);
+
+/* End of tfc_tpg_np_cit */
+
+/* Start of tfc_tpg_port_cit */
+
+CONFIGFS_EATTR_STRUCT(target_fabric_port, se_lun);
+#define TCM_PORT_ATTR(_name, _mode)					\
+static struct target_fabric_port_attribute target_fabric_port_##_name =	\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_fabric_port_show_attr_##_name,				\
+	target_fabric_port_store_attr_##_name);
+
+#define TCM_PORT_ATTOR_RO(_name)					\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_fabric_port_show_attr_##_name);
+
+/*
+ * alua_tg_pt_gp
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
+	struct se_lun *lun,
+	char *page)
+{
+	if (!lun || !lun->lun_sep)
+		return -ENODEV;
+
+	return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	if (!lun || !lun->lun_sep)
+		return -ENODEV;
+
+	return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_tg_pt_offline
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
+	struct se_lun *lun,
+	char *page)
+{
+	if (!lun || !lun->lun_sep)
+		return -ENODEV;
+
+	return core_alua_show_offline_bit(lun, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	if (!lun || !lun->lun_sep)
+		return -ENODEV;
+
+	return core_alua_store_offline_bit(lun, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_offline, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_tg_pt_status
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
+	struct se_lun *lun,
+	char *page)
+{
+	if (!lun || !lun->lun_sep)
+		return -ENODEV;
+
+	return core_alua_show_secondary_status(lun, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	if (!lun || !lun->lun_sep)
+		return -ENODEV;
+
+	return core_alua_store_secondary_status(lun, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_status, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_tg_pt_write_md
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
+	struct se_lun *lun,
+	char *page)
+{
+	if (!lun || !lun->lun_sep)
+		return -ENODEV;
+
+	return core_alua_show_secondary_write_metadata(lun, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	if (!lun || !lun->lun_sep)
+		return -ENODEV;
+
+	return core_alua_store_secondary_write_metadata(lun, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_write_md, S_IRUGO | S_IWUSR);
+
+
+static struct configfs_attribute *target_fabric_port_attrs[] = {
+	&target_fabric_port_alua_tg_pt_gp.attr,
+	&target_fabric_port_alua_tg_pt_offline.attr,
+	&target_fabric_port_alua_tg_pt_status.attr,
+	&target_fabric_port_alua_tg_pt_write_md.attr,
+	NULL,
+};
+
+CONFIGFS_EATTR_OPS(target_fabric_port, se_lun, lun_group);
+
+static int target_fabric_port_link(
+	struct config_item *lun_ci,
+	struct config_item *se_dev_ci)
+{
+	struct config_item *tpg_ci;
+	struct se_device *dev;
+	struct se_lun *lun = container_of(to_config_group(lun_ci),
+				struct se_lun, lun_group);
+	struct se_lun *lun_p;
+	struct se_portal_group *se_tpg;
+	struct se_subsystem_dev *se_dev = container_of(
+				to_config_group(se_dev_ci), struct se_subsystem_dev,
+				se_dev_group);
+	struct target_fabric_configfs *tf;
+	int ret;
+
+	tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
+	se_tpg = container_of(to_config_group(tpg_ci),
+				struct se_portal_group, tpg_group);
+	tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+	if (lun->lun_se_dev !=  NULL) {
+		pr_err("Port Symlink already exists\n");
+		return -EEXIST;
+	}
+
+	dev = se_dev->se_dev_ptr;
+	if (!dev) {
+		pr_err("Unable to locate struct se_device pointer from"
+			" %s\n", config_item_name(se_dev_ci));
+		ret = -ENODEV;
+		goto out;
+	}
+	if (dev->dev_link_magic != SE_DEV_LINK_MAGIC) {
+		pr_err("Bad dev->dev_link_magic, not a valid se_dev_ci pointer:"
+			" %p to struct se_device: %p\n", se_dev_ci, dev);
+		return -EFAULT;
+	}
+
+	lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
+				lun->unpacked_lun);
+	if (IS_ERR(lun_p)) {
+		pr_err("core_dev_add_lun() failed\n");
+		ret = PTR_ERR(lun_p);
+		goto out;
+	}
+
+	if (tf->tf_ops.fabric_post_link) {
+		/*
+		 * Call the optional fabric_post_link() to allow a
+		 * fabric module to setup any additional state once
+		 * core_dev_add_lun() has been called..
+		 */
+		tf->tf_ops.fabric_post_link(se_tpg, lun);
+	}
+
+	return 0;
+out:
+	return ret;
+}
+
+static int target_fabric_port_unlink(
+	struct config_item *lun_ci,
+	struct config_item *se_dev_ci)
+{
+	struct se_lun *lun = container_of(to_config_group(lun_ci),
+				struct se_lun, lun_group);
+	struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg;
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+	if (tf->tf_ops.fabric_pre_unlink) {
+		/*
+		 * Call the optional fabric_pre_unlink() to allow a
+		 * fabric module to release any additional stat before
+		 * core_dev_del_lun() is called.
+		*/
+		tf->tf_ops.fabric_pre_unlink(se_tpg, lun);
+	}
+
+	core_dev_del_lun(se_tpg, lun->unpacked_lun);
+	return 0;
+}
+
+static struct configfs_item_operations target_fabric_port_item_ops = {
+	.show_attribute		= target_fabric_port_attr_show,
+	.store_attribute	= target_fabric_port_attr_store,
+	.allow_link		= target_fabric_port_link,
+	.drop_link		= target_fabric_port_unlink,
+};
+
+TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_attrs);
+
+/* End of tfc_tpg_port_cit */
+
+/* Start of tfc_tpg_port_stat_cit */
+
+static struct config_group *target_core_port_stat_mkdir(
+	struct config_group *group,
+	const char *name)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static void target_core_port_stat_rmdir(
+	struct config_group *group,
+	struct config_item *item)
+{
+	return;
+}
+
+static struct configfs_group_operations target_fabric_port_stat_group_ops = {
+	.make_group		= target_core_port_stat_mkdir,
+	.drop_item		= target_core_port_stat_rmdir,
+};
+
+TF_CIT_SETUP(tpg_port_stat, NULL, &target_fabric_port_stat_group_ops, NULL);
+
+/* End of tfc_tpg_port_stat_cit */
+
+/* Start of tfc_tpg_lun_cit */
+
+static struct config_group *target_fabric_make_lun(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_lun *lun;
+	struct se_portal_group *se_tpg = container_of(group,
+			struct se_portal_group, tpg_lun_group);
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct config_group *lun_cg = NULL, *port_stat_grp = NULL;
+	unsigned long unpacked_lun;
+	int errno;
+
+	if (strstr(name, "lun_") != name) {
+		pr_err("Unable to locate \'_\" in"
+				" \"lun_$LUN_NUMBER\"\n");
+		return ERR_PTR(-EINVAL);
+	}
+	if (strict_strtoul(name + 4, 0, &unpacked_lun) || unpacked_lun > UINT_MAX)
+		return ERR_PTR(-EINVAL);
+
+	lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
+	if (!lun)
+		return ERR_PTR(-EINVAL);
+
+	lun_cg = &lun->lun_group;
+	lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+				GFP_KERNEL);
+	if (!lun_cg->default_groups) {
+		pr_err("Unable to allocate lun_cg->default_groups\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	config_group_init_type_name(&lun->lun_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_port_cit);
+	config_group_init_type_name(&lun->port_stat_grps.stat_group,
+			"statistics", &TF_CIT_TMPL(tf)->tfc_tpg_port_stat_cit);
+	lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
+	lun_cg->default_groups[1] = NULL;
+
+	port_stat_grp = &lun->port_stat_grps.stat_group;
+	port_stat_grp->default_groups =  kzalloc(sizeof(struct config_group) * 3,
+				GFP_KERNEL);
+	if (!port_stat_grp->default_groups) {
+		pr_err("Unable to allocate port_stat_grp->default_groups\n");
+		errno = -ENOMEM;
+		goto out;
+	}
+	target_stat_setup_port_default_groups(lun);
+
+	return &lun->lun_group;
+out:
+	if (lun_cg)
+		kfree(lun_cg->default_groups);
+	return ERR_PTR(errno);
+}
+
+static void target_fabric_drop_lun(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_lun *lun = container_of(to_config_group(item),
+				struct se_lun, lun_group);
+	struct config_item *df_item;
+	struct config_group *lun_cg, *port_stat_grp;
+	int i;
+
+	port_stat_grp = &lun->port_stat_grps.stat_group;
+	for (i = 0; port_stat_grp->default_groups[i]; i++) {
+		df_item = &port_stat_grp->default_groups[i]->cg_item;
+		port_stat_grp->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(port_stat_grp->default_groups);
+
+	lun_cg = &lun->lun_group;
+	for (i = 0; lun_cg->default_groups[i]; i++) {
+		df_item = &lun_cg->default_groups[i]->cg_item;
+		lun_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(lun_cg->default_groups);
+
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_fabric_lun_group_ops = {
+	.make_group	= &target_fabric_make_lun,
+	.drop_item	= &target_fabric_drop_lun,
+};
+
+TF_CIT_SETUP(tpg_lun, NULL, &target_fabric_lun_group_ops, NULL);
+
+/* End of tfc_tpg_lun_cit */
+
+/* Start of tfc_tpg_attrib_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_tpg_attrib, se_portal_group, tpg_attrib_group);
+
+static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = {
+	.show_attribute		= target_fabric_tpg_attrib_attr_show,
+	.store_attribute	= target_fabric_tpg_attrib_attr_store,
+};
+
+TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_attrib_cit */
+
+/* Start of tfc_tpg_param_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_tpg_param, se_portal_group, tpg_param_group);
+
+static struct configfs_item_operations target_fabric_tpg_param_item_ops = {
+	.show_attribute		= target_fabric_tpg_param_attr_show,
+	.store_attribute	= target_fabric_tpg_param_attr_store,
+};
+
+TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_param_cit */
+
+/* Start of tfc_tpg_base_cit */
+/*
+ * For use with TF_TPG_ATTR() and TF_TPG_ATTR_RO()
+ */
+CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group);
+
+static void target_fabric_tpg_release(struct config_item *item)
+{
+	struct se_portal_group *se_tpg = container_of(to_config_group(item),
+			struct se_portal_group, tpg_group);
+	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
+	struct target_fabric_configfs *tf = wwn->wwn_tf;
+
+	tf->tf_ops.fabric_drop_tpg(se_tpg);
+}
+
+static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
+	.release		= target_fabric_tpg_release,
+	.show_attribute		= target_fabric_tpg_attr_show,
+	.store_attribute	= target_fabric_tpg_attr_store,
+};
+
+TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_base_cit */
+
+/* Start of tfc_tpg_cit */
+
+static struct config_group *target_fabric_make_tpg(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
+	struct target_fabric_configfs *tf = wwn->wwn_tf;
+	struct se_portal_group *se_tpg;
+
+	if (!tf->tf_ops.fabric_make_tpg) {
+		pr_err("tf->tf_ops.fabric_make_tpg is NULL\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
+	if (!se_tpg || IS_ERR(se_tpg))
+		return ERR_PTR(-EINVAL);
+	/*
+	 * Setup default groups from pre-allocated se_tpg->tpg_default_groups
+	 */
+	se_tpg->tpg_group.default_groups = se_tpg->tpg_default_groups;
+	se_tpg->tpg_group.default_groups[0] = &se_tpg->tpg_lun_group;
+	se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group;
+	se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group;
+	se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group;
+	se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_param_group;
+	se_tpg->tpg_group.default_groups[5] = NULL;
+
+	config_group_init_type_name(&se_tpg->tpg_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_base_cit);
+	config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
+			&TF_CIT_TMPL(tf)->tfc_tpg_lun_cit);
+	config_group_init_type_name(&se_tpg->tpg_np_group, "np",
+			&TF_CIT_TMPL(tf)->tfc_tpg_np_cit);
+	config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit);
+	config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
+			&TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit);
+	config_group_init_type_name(&se_tpg->tpg_param_group, "param",
+			&TF_CIT_TMPL(tf)->tfc_tpg_param_cit);
+
+	return &se_tpg->tpg_group;
+}
+
+static void target_fabric_drop_tpg(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_portal_group *se_tpg = container_of(to_config_group(item),
+				struct se_portal_group, tpg_group);
+	struct config_group *tpg_cg = &se_tpg->tpg_group;
+	struct config_item *df_item;
+	int i;
+	/*
+	 * Release default groups, but do not release tpg_cg->default_groups
+	 * memory as it is statically allocated at se_tpg->tpg_default_groups.
+	 */
+	for (i = 0; tpg_cg->default_groups[i]; i++) {
+		df_item = &tpg_cg->default_groups[i]->cg_item;
+		tpg_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+
+	config_item_put(item);
+}
+
+static void target_fabric_release_wwn(struct config_item *item)
+{
+	struct se_wwn *wwn = container_of(to_config_group(item),
+				struct se_wwn, wwn_group);
+	struct target_fabric_configfs *tf = wwn->wwn_tf;
+
+	tf->tf_ops.fabric_drop_wwn(wwn);
+}
+
+static struct configfs_item_operations target_fabric_tpg_item_ops = {
+	.release	= target_fabric_release_wwn,
+};
+
+static struct configfs_group_operations target_fabric_tpg_group_ops = {
+	.make_group	= target_fabric_make_tpg,
+	.drop_item	= target_fabric_drop_tpg,
+};
+
+TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops,
+		NULL);
+
+/* End of tfc_tpg_cit */
+
+/* Start of tfc_wwn_fabric_stats_cit */
+/*
+ * This is used as a placeholder for struct se_wwn->fabric_stat_group
+ * to allow fabrics access to ->fabric_stat_group->default_groups[]
+ */
+TF_CIT_SETUP(wwn_fabric_stats, NULL, NULL, NULL);
+
+/* End of tfc_wwn_fabric_stats_cit */
+
+/* Start of tfc_wwn_cit */
+
+static struct config_group *target_fabric_make_wwn(
+	struct config_group *group,
+	const char *name)
+{
+	struct target_fabric_configfs *tf = container_of(group,
+				struct target_fabric_configfs, tf_group);
+	struct se_wwn *wwn;
+
+	if (!tf->tf_ops.fabric_make_wwn) {
+		pr_err("tf->tf_ops.fabric_make_wwn is NULL\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
+	if (!wwn || IS_ERR(wwn))
+		return ERR_PTR(-EINVAL);
+
+	wwn->wwn_tf = tf;
+	/*
+	 * Setup default groups from pre-allocated wwn->wwn_default_groups
+	 */
+	wwn->wwn_group.default_groups = wwn->wwn_default_groups;
+	wwn->wwn_group.default_groups[0] = &wwn->fabric_stat_group;
+	wwn->wwn_group.default_groups[1] = NULL;
+
+	config_group_init_type_name(&wwn->wwn_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_cit);
+	config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics",
+			&TF_CIT_TMPL(tf)->tfc_wwn_fabric_stats_cit);
+
+	return &wwn->wwn_group;
+}
+
+static void target_fabric_drop_wwn(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_wwn *wwn = container_of(to_config_group(item),
+				struct se_wwn, wwn_group);
+	struct config_item *df_item;
+	struct config_group *cg = &wwn->wwn_group;
+	int i;
+
+	for (i = 0; cg->default_groups[i]; i++) {
+		df_item = &cg->default_groups[i]->cg_item;
+		cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_fabric_wwn_group_ops = {
+	.make_group	= target_fabric_make_wwn,
+	.drop_item	= target_fabric_drop_wwn,
+};
+/*
+ * For use with TF_WWN_ATTR() and TF_WWN_ATTR_RO()
+ */
+CONFIGFS_EATTR_OPS(target_fabric_wwn, target_fabric_configfs, tf_group);
+
+static struct configfs_item_operations target_fabric_wwn_item_ops = {
+	.show_attribute		= target_fabric_wwn_attr_show,
+	.store_attribute	= target_fabric_wwn_attr_store,
+};
+
+TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL);
+
+/* End of tfc_wwn_cit */
+
+/* Start of tfc_discovery_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_discovery, target_fabric_configfs,
+		tf_disc_group);
+
+static struct configfs_item_operations target_fabric_discovery_item_ops = {
+	.show_attribute		= target_fabric_discovery_attr_show,
+	.store_attribute	= target_fabric_discovery_attr_store,
+};
+
+TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL);
+
+/* End of tfc_discovery_cit */
+
+int target_fabric_setup_cits(struct target_fabric_configfs *tf)
+{
+	target_fabric_setup_discovery_cit(tf);
+	target_fabric_setup_wwn_cit(tf);
+	target_fabric_setup_wwn_fabric_stats_cit(tf);
+	target_fabric_setup_tpg_cit(tf);
+	target_fabric_setup_tpg_base_cit(tf);
+	target_fabric_setup_tpg_port_cit(tf);
+	target_fabric_setup_tpg_port_stat_cit(tf);
+	target_fabric_setup_tpg_lun_cit(tf);
+	target_fabric_setup_tpg_np_cit(tf);
+	target_fabric_setup_tpg_np_base_cit(tf);
+	target_fabric_setup_tpg_attrib_cit(tf);
+	target_fabric_setup_tpg_param_cit(tf);
+	target_fabric_setup_tpg_nacl_cit(tf);
+	target_fabric_setup_tpg_nacl_base_cit(tf);
+	target_fabric_setup_tpg_nacl_attrib_cit(tf);
+	target_fabric_setup_tpg_nacl_auth_cit(tf);
+	target_fabric_setup_tpg_nacl_param_cit(tf);
+	target_fabric_setup_tpg_nacl_stat_cit(tf);
+	target_fabric_setup_tpg_mappedlun_cit(tf);
+	target_fabric_setup_tpg_mappedlun_stat_cit(tf);
+
+	return 0;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_fabric_lib.c b/ap/os/linux/linux-3.4.x/drivers/target/target_core_fabric_lib.c
new file mode 100644
index 0000000..283a36e
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_fabric_lib.c
@@ -0,0 +1,452 @@
+/*******************************************************************************
+ * Filename:  target_core_fabric_lib.c
+ *
+ * This file contains generic high level protocol identifier and PR
+ * handlers for TCM fabric modules
+ *
+ * Copyright (c) 2010 Rising Tide Systems, Inc.
+ * Copyright (c) 2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_internal.h"
+#include "target_core_pr.h"
+
+/*
+ * Handlers for Serial Attached SCSI (SAS)
+ */
+u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	/*
+	 * Return a SAS Serial SCSI Protocol identifier for loopback operations
+	 * This is defined in  section 7.5.1 Table 362 in spc4r17
+	 */
+	return 0x6;
+}
+EXPORT_SYMBOL(sas_get_fabric_proto_ident);
+
+u32 sas_get_pr_transport_id(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	unsigned char *ptr;
+	int ret;
+
+	/*
+	 * Set PROTOCOL IDENTIFIER to 6h for SAS
+	 */
+	buf[0] = 0x06;
+	/*
+	 * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
+	 * over SAS Serial SCSI Protocol
+	 */
+	ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
+
+	ret = hex2bin(&buf[4], ptr, 8);
+	if (ret < 0)
+		pr_debug("sas transport_id: invalid hex string\n");
+
+	/*
+	 * The SAS Transport ID is a hardcoded 24-byte length
+	 */
+	return 24;
+}
+EXPORT_SYMBOL(sas_get_pr_transport_id);
+
+u32 sas_get_pr_transport_id_len(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	*format_code = 0;
+	/*
+	 * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
+	 * over SAS Serial SCSI Protocol
+	 *
+	 * The SAS Transport ID is a hardcoded 24-byte length
+	 */
+	return 24;
+}
+EXPORT_SYMBOL(sas_get_pr_transport_id_len);
+
+/*
+ * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
+ * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
+ */
+char *sas_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	/*
+	 * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
+	 * for initiator ports using SCSI over SAS Serial SCSI Protocol
+	 *
+	 * The TransportID for a SAS Initiator Port is of fixed size of
+	 * 24 bytes, and SAS does not contain a I_T nexus identifier,
+	 * so we return the **port_nexus_ptr set to NULL.
+	 */
+	*port_nexus_ptr = NULL;
+	*out_tid_len = 24;
+
+	return (char *)&buf[4];
+}
+EXPORT_SYMBOL(sas_parse_pr_out_transport_id);
+
+/*
+ * Handlers for Fibre Channel Protocol (FCP)
+ */
+u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	return 0x0;	/* 0 = fcp-2 per SPC4 section 7.5.1 */
+}
+EXPORT_SYMBOL(fc_get_fabric_proto_ident);
+
+u32 fc_get_pr_transport_id_len(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	*format_code = 0;
+	/*
+	 * The FC Transport ID is a hardcoded 24-byte length
+	 */
+	return 24;
+}
+EXPORT_SYMBOL(fc_get_pr_transport_id_len);
+
+u32 fc_get_pr_transport_id(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	unsigned char *ptr;
+	int i, ret;
+	u32 off = 8;
+
+	/*
+	 * PROTOCOL IDENTIFIER is 0h for FCP-2
+	 *
+	 * From spc4r17, 7.5.4.2 TransportID for initiator ports using
+	 * SCSI over Fibre Channel
+	 *
+	 * We convert the ASCII formatted N Port name into a binary
+	 * encoded TransportID.
+	 */
+	ptr = &se_nacl->initiatorname[0];
+
+	for (i = 0; i < 24; ) {
+		if (!strncmp(&ptr[i], ":", 1)) {
+			i++;
+			continue;
+		}
+		ret = hex2bin(&buf[off++], &ptr[i], 1);
+		if (ret < 0)
+			pr_debug("fc transport_id: invalid hex string\n");
+		i += 2;
+	}
+	/*
+	 * The FC Transport ID is a hardcoded 24-byte length
+	 */
+	return 24;
+}
+EXPORT_SYMBOL(fc_get_pr_transport_id);
+
+char *fc_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	/*
+	 * The TransportID for a FC N Port is of fixed size of
+	 * 24 bytes, and FC does not contain a I_T nexus identifier,
+	 * so we return the **port_nexus_ptr set to NULL.
+	 */
+	*port_nexus_ptr = NULL;
+	*out_tid_len = 24;
+
+	 return (char *)&buf[8];
+}
+EXPORT_SYMBOL(fc_parse_pr_out_transport_id);
+
+/*
+ * Handlers for Internet Small Computer Systems Interface (iSCSI)
+ */
+
+u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	/*
+	 * This value is defined for "Internet SCSI (iSCSI)"
+	 * in spc4r17 section 7.5.1 Table 362
+	 */
+	return 0x5;
+}
+EXPORT_SYMBOL(iscsi_get_fabric_proto_ident);
+
+u32 iscsi_get_pr_transport_id(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	u32 off = 4, padding = 0;
+	u16 len = 0;
+
+	spin_lock_irq(&se_nacl->nacl_sess_lock);
+	/*
+	 * Set PROTOCOL IDENTIFIER to 5h for iSCSI
+	*/
+	buf[0] = 0x05;
+	/*
+	 * From spc4r17 Section 7.5.4.6: TransportID for initiator
+	 * ports using SCSI over iSCSI.
+	 *
+	 * The null-terminated, null-padded (see 4.4.2) ISCSI NAME field
+	 * shall contain the iSCSI name of an iSCSI initiator node (see
+	 * RFC 3720). The first ISCSI NAME field byte containing an ASCII
+	 * null character terminates the ISCSI NAME field without regard for
+	 * the specified length of the iSCSI TransportID or the contents of
+	 * the ADDITIONAL LENGTH field.
+	 */
+	len = sprintf(&buf[off], "%s", se_nacl->initiatorname);
+	/*
+	 * Add Extra byte for NULL terminator
+	 */
+	len++;
+	/*
+	 * If there is ISID present with the registration and *format code == 1
+	 * 1, use iSCSI Initiator port TransportID format.
+	 *
+	 * Otherwise use iSCSI Initiator device TransportID format that
+	 * does not contain the ASCII encoded iSCSI Initiator iSID value
+	 * provied by the iSCSi Initiator during the iSCSI login process.
+	 */
+	if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) {
+		/*
+		 * Set FORMAT CODE 01b for iSCSI Initiator port TransportID
+		 * format.
+		 */
+		buf[0] |= 0x40;
+		/*
+		 * From spc4r17 Section 7.5.4.6: TransportID for initiator
+		 * ports using SCSI over iSCSI.  Table 390
+		 *
+		 * The SEPARATOR field shall contain the five ASCII
+		 * characters ",i,0x".
+		 *
+		 * The null-terminated, null-padded ISCSI INITIATOR SESSION ID
+		 * field shall contain the iSCSI initiator session identifier
+		 * (see RFC 3720) in the form of ASCII characters that are the
+		 * hexadecimal digits converted from the binary iSCSI initiator
+		 * session identifier value. The first ISCSI INITIATOR SESSION
+		 * ID field byte containing an ASCII null character
+		 */
+		buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
+		buf[off+len] = 0x69; off++; /* ASCII Character: "i" */
+		buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
+		buf[off+len] = 0x30; off++; /* ASCII Character: "0" */
+		buf[off+len] = 0x78; off++; /* ASCII Character: "x" */
+		len += 5;
+		buf[off+len] = pr_reg->pr_reg_isid[0]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[1]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[2]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[3]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[4]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[5]; off++;
+		buf[off+len] = '\0'; off++;
+		len += 7;
+	}
+	spin_unlock_irq(&se_nacl->nacl_sess_lock);
+	/*
+	 * The ADDITIONAL LENGTH field specifies the number of bytes that follow
+	 * in the TransportID. The additional length shall be at least 20 and
+	 * shall be a multiple of four.
+	*/
+	padding = ((-len) & 3);
+	if (padding != 0)
+		len += padding;
+
+	buf[2] = ((len >> 8) & 0xff);
+	buf[3] = (len & 0xff);
+	/*
+	 * Increment value for total payload + header length for
+	 * full status descriptor
+	 */
+	len += 4;
+
+	return len;
+}
+EXPORT_SYMBOL(iscsi_get_pr_transport_id);
+
+u32 iscsi_get_pr_transport_id_len(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	u32 len = 0, padding = 0;
+
+	spin_lock_irq(&se_nacl->nacl_sess_lock);
+	len = strlen(se_nacl->initiatorname);
+	/*
+	 * Add extra byte for NULL terminator
+	 */
+	len++;
+	/*
+	 * If there is ISID present with the registration, use format code:
+	 * 01b: iSCSI Initiator port TransportID format
+	 *
+	 * If there is not an active iSCSI session, use format code:
+	 * 00b: iSCSI Initiator device TransportID format
+	 */
+	if (pr_reg->isid_present_at_reg) {
+		len += 5; /* For ",i,0x" ASCII seperator */
+		len += 7; /* For iSCSI Initiator Session ID + Null terminator */
+		*format_code = 1;
+	} else
+		*format_code = 0;
+	spin_unlock_irq(&se_nacl->nacl_sess_lock);
+	/*
+	 * The ADDITIONAL LENGTH field specifies the number of bytes that follow
+	 * in the TransportID. The additional length shall be at least 20 and
+	 * shall be a multiple of four.
+	 */
+	padding = ((-len) & 3);
+	if (padding != 0)
+		len += padding;
+	/*
+	 * Increment value for total payload + header length for
+	 * full status descriptor
+	 */
+	len += 4;
+
+	return len;
+}
+EXPORT_SYMBOL(iscsi_get_pr_transport_id_len);
+
+char *iscsi_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	char *p;
+	u32 tid_len, padding;
+	int i;
+	u16 add_len;
+	u8 format_code = (buf[0] & 0xc0);
+	/*
+	 * Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6:
+	 *
+	 *       TransportID for initiator ports using SCSI over iSCSI,
+	 *       from Table 388 -- iSCSI TransportID formats.
+	 *
+	 *    00b     Initiator port is identified using the world wide unique
+	 *            SCSI device name of the iSCSI initiator
+	 *            device containing the initiator port (see table 389).
+	 *    01b     Initiator port is identified using the world wide unique
+	 *            initiator port identifier (see table 390).10b to 11b
+	 *            Reserved
+	 */
+	if ((format_code != 0x00) && (format_code != 0x40)) {
+		pr_err("Illegal format code: 0x%02x for iSCSI"
+			" Initiator Transport ID\n", format_code);
+		return NULL;
+	}
+	/*
+	 * If the caller wants the TransportID Length, we set that value for the
+	 * entire iSCSI Tarnsport ID now.
+	 */
+	 if (out_tid_len != NULL) {
+		add_len = ((buf[2] >> 8) & 0xff);
+		add_len |= (buf[3] & 0xff);
+
+		tid_len = strlen(&buf[4]);
+		tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
+		tid_len += 1; /* Add one byte for NULL terminator */
+		padding = ((-tid_len) & 3);
+		if (padding != 0)
+			tid_len += padding;
+
+		if ((add_len + 4) != tid_len) {
+			pr_debug("LIO-Target Extracted add_len: %hu "
+				"does not match calculated tid_len: %u,"
+				" using tid_len instead\n", add_len+4, tid_len);
+			*out_tid_len = tid_len;
+		} else
+			*out_tid_len = (add_len + 4);
+	}
+	/*
+	 * Check for ',i,0x' seperator between iSCSI Name and iSCSI Initiator
+	 * Session ID as defined in Table 390 - iSCSI initiator port TransportID
+	 * format.
+	 */
+	if (format_code == 0x40) {
+		p = strstr(&buf[4], ",i,0x");
+		if (!p) {
+			pr_err("Unable to locate \",i,0x\" seperator"
+				" for Initiator port identifier: %s\n",
+				&buf[4]);
+			return NULL;
+		}
+		*p = '\0'; /* Terminate iSCSI Name */
+		p += 5; /* Skip over ",i,0x" seperator */
+
+		*port_nexus_ptr = p;
+		/*
+		 * Go ahead and do the lower case conversion of the received
+		 * 12 ASCII characters representing the ISID in the TransportID
+		 * for comparison against the running iSCSI session's ISID from
+		 * iscsi_target.c:lio_sess_get_initiator_sid()
+		 */
+		for (i = 0; i < 12; i++) {
+			if (isdigit(*p)) {
+				p++;
+				continue;
+			}
+			*p = tolower(*p);
+			p++;
+		}
+	}
+
+	return (char *)&buf[4];
+}
+EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id);
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_file.c b/ap/os/linux/linux-3.4.x/drivers/target/target_core_file.c
new file mode 100644
index 0000000..f44459a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_file.c
@@ -0,0 +1,658 @@
+/*******************************************************************************
+ * Filename:  target_core_file.c
+ *
+ * This file contains the Storage Engine <-> FILEIO transport specific functions
+ *
+ * Copyright (c) 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005-2006 SBE, Inc.  All Rights Reserved.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+
+#include "target_core_file.h"
+
+static struct se_subsystem_api fileio_template;
+
+/*	fd_attach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int fd_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	struct fd_host *fd_host;
+
+	fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
+	if (!fd_host) {
+		pr_err("Unable to allocate memory for struct fd_host\n");
+		return -ENOMEM;
+	}
+
+	fd_host->fd_host_id = host_id;
+
+	hba->hba_ptr = fd_host;
+
+	pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
+		" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
+		TARGET_CORE_MOD_VERSION);
+	pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
+		" MaxSectors: %u\n",
+		hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
+
+	return 0;
+}
+
+static void fd_detach_hba(struct se_hba *hba)
+{
+	struct fd_host *fd_host = hba->hba_ptr;
+
+	pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
+		" Target Core\n", hba->hba_id, fd_host->fd_host_id);
+
+	kfree(fd_host);
+	hba->hba_ptr = NULL;
+}
+
+static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+	struct fd_dev *fd_dev;
+	struct fd_host *fd_host = hba->hba_ptr;
+
+	fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
+	if (!fd_dev) {
+		pr_err("Unable to allocate memory for struct fd_dev\n");
+		return NULL;
+	}
+
+	fd_dev->fd_host = fd_host;
+
+	pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
+
+	return fd_dev;
+}
+
+/*	fd_create_virtdevice(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static struct se_device *fd_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p)
+{
+	char *dev_p = NULL;
+	struct se_device *dev;
+	struct se_dev_limits dev_limits;
+	struct queue_limits *limits;
+	struct fd_dev *fd_dev = p;
+	struct fd_host *fd_host = hba->hba_ptr;
+	mm_segment_t old_fs;
+	struct file *file;
+	struct inode *inode = NULL;
+	int dev_flags = 0, flags, ret = -EINVAL;
+
+	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	dev_p = getname(fd_dev->fd_dev_name);
+	set_fs(old_fs);
+
+	if (IS_ERR(dev_p)) {
+		pr_err("getname(%s) failed: %lu\n",
+			fd_dev->fd_dev_name, IS_ERR(dev_p));
+		ret = PTR_ERR(dev_p);
+		goto fail;
+	}
+	/*
+	 * Use O_DSYNC by default instead of O_SYNC to forgo syncing
+	 * of pure timestamp updates.
+	 */
+	flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
+	/*
+	 * Optionally allow fd_buffered_io=1 to be enabled for people
+	 * who want use the fs buffer cache as an WriteCache mechanism.
+	 *
+	 * This means that in event of a hard failure, there is a risk
+	 * of silent data-loss if the SCSI client has *not* performed a
+	 * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
+	 * to write-out the entire device cache.
+	 */
+	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
+		pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
+		flags &= ~O_DSYNC;
+	}
+
+	file = filp_open(dev_p, flags, 0600);
+	if (IS_ERR(file)) {
+		pr_err("filp_open(%s) failed\n", dev_p);
+		ret = PTR_ERR(file);
+		goto fail;
+	}
+	if (!file || !file->f_dentry) {
+		pr_err("filp_open(%s) failed\n", dev_p);
+		goto fail;
+	}
+	fd_dev->fd_file = file;
+	/*
+	 * If using a block backend with this struct file, we extract
+	 * fd_dev->fd_[block,dev]_size from struct block_device.
+	 *
+	 * Otherwise, we use the passed fd_size= from configfs
+	 */
+	inode = file->f_mapping->host;
+	if (S_ISBLK(inode->i_mode)) {
+		struct request_queue *q;
+		unsigned long long dev_size;
+		/*
+		 * Setup the local scope queue_limits from struct request_queue->limits
+		 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
+		 */
+		q = bdev_get_queue(inode->i_bdev);
+		limits = &dev_limits.limits;
+		limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
+		limits->max_hw_sectors = queue_max_hw_sectors(q);
+		limits->max_sectors = queue_max_sectors(q);
+		/*
+		 * Determine the number of bytes from i_size_read() minus
+		 * one (1) logical sector from underlying struct block_device
+		 */
+		fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
+		dev_size = (i_size_read(file->f_mapping->host) -
+				       fd_dev->fd_block_size);
+
+		pr_debug("FILEIO: Using size: %llu bytes from struct"
+			" block_device blocks: %llu logical_block_size: %d\n",
+			dev_size, div_u64(dev_size, fd_dev->fd_block_size),
+			fd_dev->fd_block_size);
+	} else {
+		if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
+			pr_err("FILEIO: Missing fd_dev_size="
+				" parameter, and no backing struct"
+				" block_device\n");
+			goto fail;
+		}
+
+		limits = &dev_limits.limits;
+		limits->logical_block_size = FD_BLOCKSIZE;
+		limits->max_hw_sectors = FD_MAX_SECTORS;
+		limits->max_sectors = FD_MAX_SECTORS;
+		fd_dev->fd_block_size = FD_BLOCKSIZE;
+	}
+
+	dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
+	dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
+
+	dev = transport_add_device_to_core_hba(hba, &fileio_template,
+				se_dev, dev_flags, fd_dev,
+				&dev_limits, "FILEIO", FD_VERSION);
+	if (!dev)
+		goto fail;
+
+	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
+		pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
+			" with FDBD_HAS_BUFFERED_IO_WCE\n");
+		dev->se_sub_dev->se_dev_attrib.emulate_write_cache = 1;
+	}
+
+	fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
+	fd_dev->fd_queue_depth = dev->queue_depth;
+
+	pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
+		" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
+			fd_dev->fd_dev_name, fd_dev->fd_dev_size);
+
+	putname(dev_p);
+	return dev;
+fail:
+	if (fd_dev->fd_file) {
+		filp_close(fd_dev->fd_file, NULL);
+		fd_dev->fd_file = NULL;
+	}
+	putname(dev_p);
+	return ERR_PTR(ret);
+}
+
+/*	fd_free_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void fd_free_device(void *p)
+{
+	struct fd_dev *fd_dev = p;
+
+	if (fd_dev->fd_file) {
+		filp_close(fd_dev->fd_file, NULL);
+		fd_dev->fd_file = NULL;
+	}
+
+	kfree(fd_dev);
+}
+
+static inline struct fd_request *FILE_REQ(struct se_task *task)
+{
+	return container_of(task, struct fd_request, fd_task);
+}
+
+
+static struct se_task *
+fd_alloc_task(unsigned char *cdb)
+{
+	struct fd_request *fd_req;
+
+	fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
+	if (!fd_req) {
+		pr_err("Unable to allocate struct fd_request\n");
+		return NULL;
+	}
+
+	return &fd_req->fd_task;
+}
+
+static int fd_do_readv(struct se_task *task)
+{
+	struct fd_request *req = FILE_REQ(task);
+	struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
+	struct fd_dev *dev = se_dev->dev_ptr;
+	struct file *fd = dev->fd_file;
+	struct scatterlist *sg = task->task_sg;
+	struct iovec *iov;
+	mm_segment_t old_fs;
+	loff_t pos = (task->task_lba *
+		      se_dev->se_sub_dev->se_dev_attrib.block_size);
+	int ret = 0, i;
+
+	iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
+	if (!iov) {
+		pr_err("Unable to allocate fd_do_readv iov[]\n");
+		return -ENOMEM;
+	}
+
+	for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+		iov[i].iov_len = sg->length;
+		iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
+	}
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
+	set_fs(old_fs);
+
+	for_each_sg(task->task_sg, sg, task->task_sg_nents, i)
+		kunmap(sg_page(sg));
+	kfree(iov);
+	/*
+	 * Return zeros and GOOD status even if the READ did not return
+	 * the expected virt_size for struct file w/o a backing struct
+	 * block_device.
+	 */
+	if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
+		if (ret < 0 || ret != task->task_size) {
+			pr_err("vfs_readv() returned %d,"
+				" expecting %d for S_ISBLK\n", ret,
+				(int)task->task_size);
+			return (ret < 0 ? ret : -EINVAL);
+		}
+	} else {
+		if (ret < 0) {
+			pr_err("vfs_readv() returned %d for non"
+				" S_ISBLK\n", ret);
+			return ret;
+		}
+	}
+
+	return 1;
+}
+
+static int fd_do_writev(struct se_task *task)
+{
+	struct fd_request *req = FILE_REQ(task);
+	struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
+	struct fd_dev *dev = se_dev->dev_ptr;
+	struct file *fd = dev->fd_file;
+	struct scatterlist *sg = task->task_sg;
+	struct iovec *iov;
+	mm_segment_t old_fs;
+	loff_t pos = (task->task_lba *
+		      se_dev->se_sub_dev->se_dev_attrib.block_size);
+	int ret, i = 0;
+
+	iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
+	if (!iov) {
+		pr_err("Unable to allocate fd_do_writev iov[]\n");
+		return -ENOMEM;
+	}
+
+	for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+		iov[i].iov_len = sg->length;
+		iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
+	}
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
+	set_fs(old_fs);
+
+	for_each_sg(task->task_sg, sg, task->task_sg_nents, i)
+		kunmap(sg_page(sg));
+
+	kfree(iov);
+
+	if (ret < 0 || ret != task->task_size) {
+		pr_err("vfs_writev() returned %d\n", ret);
+		return (ret < 0 ? ret : -EINVAL);
+	}
+
+	return 1;
+}
+
+static void fd_emulate_sync_cache(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct se_device *dev = cmd->se_dev;
+	struct fd_dev *fd_dev = dev->dev_ptr;
+	int immed = (cmd->t_task_cdb[1] & 0x2);
+	loff_t start, end;
+	int ret;
+
+	/*
+	 * If the Immediate bit is set, queue up the GOOD response
+	 * for this SYNCHRONIZE_CACHE op
+	 */
+	if (immed)
+		transport_complete_sync_cache(cmd, 1);
+
+	/*
+	 * Determine if we will be flushing the entire device.
+	 */
+	if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
+		start = 0;
+		end = LLONG_MAX;
+	} else {
+		start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+		if (cmd->data_length)
+			end = start + cmd->data_length;
+		else
+			end = LLONG_MAX;
+	}
+
+	ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+	if (ret != 0)
+		pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
+
+	if (!immed)
+		transport_complete_sync_cache(cmd, ret == 0);
+}
+
+static int fd_do_task(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct se_device *dev = cmd->se_dev;
+	int ret = 0;
+
+	/*
+	 * Call vectorized fileio functions to map struct scatterlist
+	 * physical memory addresses to struct iovec virtual memory.
+	 */
+	if (task->task_data_direction == DMA_FROM_DEVICE) {
+		ret = fd_do_readv(task);
+	} else {
+		ret = fd_do_writev(task);
+		/*
+		 * Perform implict vfs_fsync_range() for fd_do_writev() ops
+		 * for SCSI WRITEs with Forced Unit Access (FUA) set.
+		 * Allow this to happen independent of WCE=0 setting.
+		 */
+		if (ret > 0 &&
+		    dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
+		    (cmd->se_cmd_flags & SCF_FUA)) {
+			struct fd_dev *fd_dev = dev->dev_ptr;
+			loff_t start = task->task_lba *
+				dev->se_sub_dev->se_dev_attrib.block_size;
+			loff_t end = start + task->task_size;
+
+			vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+		}
+	}
+
+	if (ret < 0) {
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		return ret;
+	}
+	if (ret) {
+		task->task_scsi_status = GOOD;
+		transport_complete_task(task, 1);
+	}
+	return 0;
+}
+
+/*	fd_free_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void fd_free_task(struct se_task *task)
+{
+	struct fd_request *req = FILE_REQ(task);
+
+	kfree(req);
+}
+
+enum {
+	Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_fd_dev_name, "fd_dev_name=%s"},
+	{Opt_fd_dev_size, "fd_dev_size=%s"},
+	{Opt_fd_buffered_io, "fd_buffered_io=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t fd_set_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	const char *page, ssize_t count)
+{
+	struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+	char *orig, *ptr, *arg_p, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, arg, token;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",\n")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_fd_dev_name:
+			arg_p = match_strdup(&args[0]);
+			if (!arg_p) {
+				ret = -ENOMEM;
+				break;
+			}
+			snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
+					"%s", arg_p);
+			kfree(arg_p);
+			pr_debug("FILEIO: Referencing Path: %s\n",
+					fd_dev->fd_dev_name);
+			fd_dev->fbd_flags |= FBDF_HAS_PATH;
+			break;
+		case Opt_fd_dev_size:
+			arg_p = match_strdup(&args[0]);
+			if (!arg_p) {
+				ret = -ENOMEM;
+				break;
+			}
+			ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
+			kfree(arg_p);
+			if (ret < 0) {
+				pr_err("strict_strtoull() failed for"
+						" fd_dev_size=\n");
+				goto out;
+			}
+			pr_debug("FILEIO: Referencing Size: %llu"
+					" bytes\n", fd_dev->fd_dev_size);
+			fd_dev->fbd_flags |= FBDF_HAS_SIZE;
+			break;
+		case Opt_fd_buffered_io:
+			match_int(args, &arg);
+			if (arg != 1) {
+				pr_err("bogus fd_buffered_io=%d value\n", arg);
+				ret = -EINVAL;
+				goto out;
+			}
+
+			pr_debug("FILEIO: Using buffered I/O"
+				" operations for struct fd_dev\n");
+
+			fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
+			break;
+		default:
+			break;
+		}
+	}
+
+out:
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
+{
+	struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+
+	if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
+		pr_err("Missing fd_dev_name=\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static ssize_t fd_show_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	char *b)
+{
+	struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+	ssize_t bl = 0;
+
+	bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
+	bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: %s\n",
+		fd_dev->fd_dev_name, fd_dev->fd_dev_size,
+		(fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
+		"Buffered-WCE" : "O_DSYNC");
+	return bl;
+}
+
+/*	fd_get_device_rev(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static u32 fd_get_device_rev(struct se_device *dev)
+{
+	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+}
+
+/*	fd_get_device_type(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static u32 fd_get_device_type(struct se_device *dev)
+{
+	return TYPE_DISK;
+}
+
+static sector_t fd_get_blocks(struct se_device *dev)
+{
+	struct fd_dev *fd_dev = dev->dev_ptr;
+	struct file *f = fd_dev->fd_file;
+	struct inode *i = f->f_mapping->host;
+	unsigned long long dev_size;
+	/*
+	 * When using a file that references an underlying struct block_device,
+	 * ensure dev_size is always based on the current inode size in order
+	 * to handle underlying block_device resize operations.
+	 */
+	if (S_ISBLK(i->i_mode))
+		dev_size = (i_size_read(i) - fd_dev->fd_block_size);
+	else
+		dev_size = fd_dev->fd_dev_size;
+
+	return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size);
+}
+
+static struct se_subsystem_api fileio_template = {
+	.name			= "fileio",
+	.owner			= THIS_MODULE,
+	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV,
+	.write_cache_emulated	= 1,
+	.fua_write_emulated	= 1,
+	.attach_hba		= fd_attach_hba,
+	.detach_hba		= fd_detach_hba,
+	.allocate_virtdevice	= fd_allocate_virtdevice,
+	.create_virtdevice	= fd_create_virtdevice,
+	.free_device		= fd_free_device,
+	.alloc_task		= fd_alloc_task,
+	.do_task		= fd_do_task,
+	.do_sync_cache		= fd_emulate_sync_cache,
+	.free_task		= fd_free_task,
+	.check_configfs_dev_params = fd_check_configfs_dev_params,
+	.set_configfs_dev_params = fd_set_configfs_dev_params,
+	.show_configfs_dev_params = fd_show_configfs_dev_params,
+	.get_device_rev		= fd_get_device_rev,
+	.get_device_type	= fd_get_device_type,
+	.get_blocks		= fd_get_blocks,
+};
+
+static int __init fileio_module_init(void)
+{
+	return transport_subsystem_register(&fileio_template);
+}
+
+static void fileio_module_exit(void)
+{
+	transport_subsystem_release(&fileio_template);
+}
+
+MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(fileio_module_init);
+module_exit(fileio_module_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_file.h b/ap/os/linux/linux-3.4.x/drivers/target/target_core_file.h
new file mode 100644
index 0000000..6b1b6a9
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_file.h
@@ -0,0 +1,44 @@
+#ifndef TARGET_CORE_FILE_H
+#define TARGET_CORE_FILE_H
+
+#define FD_VERSION		"4.0"
+
+#define FD_MAX_DEV_NAME		256
+#define FD_DEVICE_QUEUE_DEPTH	32
+#define FD_MAX_DEVICE_QUEUE_DEPTH 128
+#define FD_BLOCKSIZE		512
+#define FD_MAX_SECTORS		1024
+
+#define RRF_EMULATE_CDB		0x01
+#define RRF_GOT_LBA		0x02
+
+struct fd_request {
+	struct se_task	fd_task;
+};
+
+#define FBDF_HAS_PATH		0x01
+#define FBDF_HAS_SIZE		0x02
+#define FDBD_HAS_BUFFERED_IO_WCE 0x04
+
+struct fd_dev {
+	u32		fbd_flags;
+	unsigned char	fd_dev_name[FD_MAX_DEV_NAME];
+	/* Unique Ramdisk Device ID in Ramdisk HBA */
+	u32		fd_dev_id;
+	/* Number of SG tables in sg_table_array */
+	u32		fd_table_count;
+	u32		fd_queue_depth;
+	u32		fd_block_size;
+	unsigned long long fd_dev_size;
+	struct file	*fd_file;
+	/* FILEIO HBA device is connected to */
+	struct fd_host *fd_host;
+} ____cacheline_aligned;
+
+struct fd_host {
+	u32		fd_host_dev_id_count;
+	/* Unique FILEIO Host ID */
+	u32		fd_host_id;
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_FILE_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_hba.c b/ap/os/linux/linux-3.4.x/drivers/target/target_core_hba.c
new file mode 100644
index 0000000..3dd1bd4
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_hba.c
@@ -0,0 +1,173 @@
+/*******************************************************************************
+ * Filename:  target_core_hba.c
+ *
+ * This file contains the TCM HBA Transport related functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/in.h>
+#include <linux/module.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+
+static LIST_HEAD(subsystem_list);
+static DEFINE_MUTEX(subsystem_mutex);
+
+static u32 hba_id_counter;
+
+static DEFINE_SPINLOCK(hba_lock);
+static LIST_HEAD(hba_list);
+
+int transport_subsystem_register(struct se_subsystem_api *sub_api)
+{
+	struct se_subsystem_api *s;
+
+	INIT_LIST_HEAD(&sub_api->sub_api_list);
+
+	mutex_lock(&subsystem_mutex);
+	list_for_each_entry(s, &subsystem_list, sub_api_list) {
+		if (!strcmp(s->name, sub_api->name)) {
+			pr_err("%p is already registered with"
+				" duplicate name %s, unable to process"
+				" request\n", s, s->name);
+			mutex_unlock(&subsystem_mutex);
+			return -EEXIST;
+		}
+	}
+	list_add_tail(&sub_api->sub_api_list, &subsystem_list);
+	mutex_unlock(&subsystem_mutex);
+
+	pr_debug("TCM: Registered subsystem plugin: %s struct module:"
+			" %p\n", sub_api->name, sub_api->owner);
+	return 0;
+}
+EXPORT_SYMBOL(transport_subsystem_register);
+
+void transport_subsystem_release(struct se_subsystem_api *sub_api)
+{
+	mutex_lock(&subsystem_mutex);
+	list_del(&sub_api->sub_api_list);
+	mutex_unlock(&subsystem_mutex);
+}
+EXPORT_SYMBOL(transport_subsystem_release);
+
+static struct se_subsystem_api *core_get_backend(const char *sub_name)
+{
+	struct se_subsystem_api *s;
+
+	mutex_lock(&subsystem_mutex);
+	list_for_each_entry(s, &subsystem_list, sub_api_list) {
+		if (!strcmp(s->name, sub_name))
+			goto found;
+	}
+	mutex_unlock(&subsystem_mutex);
+	return NULL;
+found:
+	if (s->owner && !try_module_get(s->owner))
+		s = NULL;
+	mutex_unlock(&subsystem_mutex);
+	return s;
+}
+
+struct se_hba *
+core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
+{
+	struct se_hba *hba;
+	int ret = 0;
+
+	hba = kzalloc(sizeof(*hba), GFP_KERNEL);
+	if (!hba) {
+		pr_err("Unable to allocate struct se_hba\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	INIT_LIST_HEAD(&hba->hba_dev_list);
+	spin_lock_init(&hba->device_lock);
+	mutex_init(&hba->hba_access_mutex);
+
+	hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
+	hba->hba_flags |= hba_flags;
+
+	hba->transport = core_get_backend(plugin_name);
+	if (!hba->transport) {
+		ret = -EINVAL;
+		goto out_free_hba;
+	}
+
+	ret = hba->transport->attach_hba(hba, plugin_dep_id);
+	if (ret < 0)
+		goto out_module_put;
+
+	spin_lock(&hba_lock);
+	hba->hba_id = hba_id_counter++;
+	list_add_tail(&hba->hba_node, &hba_list);
+	spin_unlock(&hba_lock);
+
+	pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
+			" Core\n", hba->hba_id);
+
+	return hba;
+
+out_module_put:
+	if (hba->transport->owner)
+		module_put(hba->transport->owner);
+	hba->transport = NULL;
+out_free_hba:
+	kfree(hba);
+	return ERR_PTR(ret);
+}
+
+int
+core_delete_hba(struct se_hba *hba)
+{
+	if (!list_empty(&hba->hba_dev_list))
+		dump_stack();
+
+	hba->transport->detach_hba(hba);
+
+	spin_lock(&hba_lock);
+	list_del(&hba->hba_node);
+	spin_unlock(&hba_lock);
+
+	pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
+			" Core\n", hba->hba_id);
+
+	if (hba->transport->owner)
+		module_put(hba->transport->owner);
+
+	hba->transport = NULL;
+	kfree(hba);
+	return 0;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_iblock.c b/ap/os/linux/linux-3.4.x/drivers/target/target_core_iblock.c
new file mode 100644
index 0000000..2ec299e
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_iblock.c
@@ -0,0 +1,694 @@
+/*******************************************************************************
+ * Filename:  target_core_iblock.c
+ *
+ * This file contains the Storage Engine  <-> Linux BlockIO transport
+ * specific functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/bio.h>
+#include <linux/genhd.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+
+#include "target_core_iblock.h"
+
+#define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
+#define IBLOCK_BIO_POOL_SIZE	128
+
+static struct se_subsystem_api iblock_template;
+
+static void iblock_bio_done(struct bio *, int);
+
+/*	iblock_attach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
+		" Generic Target Core Stack %s\n", hba->hba_id,
+		IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
+	return 0;
+}
+
+static void iblock_detach_hba(struct se_hba *hba)
+{
+}
+
+static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+	struct iblock_dev *ib_dev = NULL;
+
+	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
+	if (!ib_dev) {
+		pr_err("Unable to allocate struct iblock_dev\n");
+		return NULL;
+	}
+
+	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
+
+	return ib_dev;
+}
+
+static struct se_device *iblock_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p)
+{
+	struct iblock_dev *ib_dev = p;
+	struct se_device *dev;
+	struct se_dev_limits dev_limits;
+	struct block_device *bd = NULL;
+	struct request_queue *q;
+	struct queue_limits *limits;
+	u32 dev_flags = 0;
+	int ret = -EINVAL;
+
+	if (!ib_dev) {
+		pr_err("Unable to locate struct iblock_dev parameter\n");
+		return ERR_PTR(ret);
+	}
+	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+
+	ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
+	if (!ib_dev->ibd_bio_set) {
+		pr_err("IBLOCK: Unable to create bioset()\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	pr_debug("IBLOCK: Created bio_set()\n");
+	/*
+	 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
+	 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
+	 */
+	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
+			ib_dev->ibd_udev_path);
+
+	bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
+				FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
+	if (IS_ERR(bd)) {
+		ret = PTR_ERR(bd);
+		goto failed;
+	}
+	/*
+	 * Setup the local scope queue_limits from struct request_queue->limits
+	 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
+	 */
+	q = bdev_get_queue(bd);
+	limits = &dev_limits.limits;
+	limits->logical_block_size = bdev_logical_block_size(bd);
+	limits->max_hw_sectors = UINT_MAX;
+	limits->max_sectors = UINT_MAX;
+	dev_limits.hw_queue_depth = q->nr_requests;
+	dev_limits.queue_depth = q->nr_requests;
+
+	ib_dev->ibd_bd = bd;
+
+	dev = transport_add_device_to_core_hba(hba,
+			&iblock_template, se_dev, dev_flags, ib_dev,
+			&dev_limits, "IBLOCK", IBLOCK_VERSION);
+	if (!dev)
+		goto failed;
+
+	/*
+	 * Check if the underlying struct block_device request_queue supports
+	 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
+	 * in ATA and we need to set TPE=1
+	 */
+	if (blk_queue_discard(q)) {
+		dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
+				q->limits.max_discard_sectors;
+		/*
+		 * Currently hardcoded to 1 in Linux/SCSI code..
+		 */
+		dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
+		dev->se_sub_dev->se_dev_attrib.unmap_granularity =
+				q->limits.discard_granularity >> 9;
+		dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
+				q->limits.discard_alignment;
+
+		pr_debug("IBLOCK: BLOCK Discard support available,"
+				" disabled by default\n");
+	}
+
+	if (blk_queue_nonrot(q))
+		dev->se_sub_dev->se_dev_attrib.is_nonrot = 1;
+
+	return dev;
+
+failed:
+	if (ib_dev->ibd_bio_set) {
+		bioset_free(ib_dev->ibd_bio_set);
+		ib_dev->ibd_bio_set = NULL;
+	}
+	ib_dev->ibd_bd = NULL;
+	return ERR_PTR(ret);
+}
+
+static void iblock_free_device(void *p)
+{
+	struct iblock_dev *ib_dev = p;
+
+	if (ib_dev->ibd_bd != NULL)
+		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+	if (ib_dev->ibd_bio_set != NULL)
+		bioset_free(ib_dev->ibd_bio_set);
+	kfree(ib_dev);
+}
+
+static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
+{
+	return container_of(task, struct iblock_req, ib_task);
+}
+
+static struct se_task *
+iblock_alloc_task(unsigned char *cdb)
+{
+	struct iblock_req *ib_req;
+
+	ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
+	if (!ib_req) {
+		pr_err("Unable to allocate memory for struct iblock_req\n");
+		return NULL;
+	}
+
+	atomic_set(&ib_req->pending, 1);
+	return &ib_req->ib_task;
+}
+
+static unsigned long long iblock_emulate_read_cap_with_block_size(
+	struct se_device *dev,
+	struct block_device *bd,
+	struct request_queue *q)
+{
+	unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
+					bdev_logical_block_size(bd)) - 1);
+	u32 block_size = bdev_logical_block_size(bd);
+
+	if (block_size == dev->se_sub_dev->se_dev_attrib.block_size)
+		return blocks_long;
+
+	switch (block_size) {
+	case 4096:
+		switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+		case 2048:
+			blocks_long <<= 1;
+			break;
+		case 1024:
+			blocks_long <<= 2;
+			break;
+		case 512:
+			blocks_long <<= 3;
+		default:
+			break;
+		}
+		break;
+	case 2048:
+		switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+		case 4096:
+			blocks_long >>= 1;
+			break;
+		case 1024:
+			blocks_long <<= 1;
+			break;
+		case 512:
+			blocks_long <<= 2;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 1024:
+		switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+		case 4096:
+			blocks_long >>= 2;
+			break;
+		case 2048:
+			blocks_long >>= 1;
+			break;
+		case 512:
+			blocks_long <<= 1;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 512:
+		switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+		case 4096:
+			blocks_long >>= 3;
+			break;
+		case 2048:
+			blocks_long >>= 2;
+			break;
+		case 1024:
+			blocks_long >>= 1;
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return blocks_long;
+}
+
+static void iblock_end_io_flush(struct bio *bio, int err)
+{
+	struct se_cmd *cmd = bio->bi_private;
+
+	if (err)
+		pr_err("IBLOCK: cache flush failed: %d\n", err);
+
+	if (cmd)
+		transport_complete_sync_cache(cmd, err == 0);
+	bio_put(bio);
+}
+
+/*
+ * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
+ * always flush the whole cache.
+ */
+static void iblock_emulate_sync_cache(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
+	int immed = (cmd->t_task_cdb[1] & 0x2);
+	struct bio *bio;
+
+	/*
+	 * If the Immediate bit is set, queue up the GOOD response
+	 * for this SYNCHRONIZE_CACHE op.
+	 */
+	if (immed)
+		transport_complete_sync_cache(cmd, 1);
+
+	bio = bio_alloc(GFP_KERNEL, 0);
+	bio->bi_end_io = iblock_end_io_flush;
+	bio->bi_bdev = ib_dev->ibd_bd;
+	if (!immed)
+		bio->bi_private = cmd;
+	submit_bio(WRITE_FLUSH, bio);
+}
+
+static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
+{
+	struct iblock_dev *ibd = dev->dev_ptr;
+	struct block_device *bd = ibd->ibd_bd;
+	int barrier = 0;
+
+	return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
+}
+
+static void iblock_free_task(struct se_task *task)
+{
+	kfree(IBLOCK_REQ(task));
+}
+
+enum {
+	Opt_udev_path, Opt_force, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_udev_path, "udev_path=%s"},
+	{Opt_force, "force=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
+					       struct se_subsystem_dev *se_dev,
+					       const char *page, ssize_t count)
+{
+	struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
+	char *orig, *ptr, *arg_p, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, token;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",\n")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_udev_path:
+			if (ib_dev->ibd_bd) {
+				pr_err("Unable to set udev_path= while"
+					" ib_dev->ibd_bd exists\n");
+				ret = -EEXIST;
+				goto out;
+			}
+			arg_p = match_strdup(&args[0]);
+			if (!arg_p) {
+				ret = -ENOMEM;
+				break;
+			}
+			snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
+					"%s", arg_p);
+			kfree(arg_p);
+			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
+					ib_dev->ibd_udev_path);
+			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
+			break;
+		case Opt_force:
+			break;
+		default:
+			break;
+		}
+	}
+
+out:
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t iblock_check_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev)
+{
+	struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
+
+	if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
+		pr_err("Missing udev_path= parameters for IBLOCK\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static ssize_t iblock_show_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	char *b)
+{
+	struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
+	struct block_device *bd = ibd->ibd_bd;
+	char buf[BDEVNAME_SIZE];
+	ssize_t bl = 0;
+
+	if (bd)
+		bl += sprintf(b + bl, "iBlock device: %s",
+				bdevname(bd, buf));
+	if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) {
+		bl += sprintf(b + bl, "  UDEV PATH: %s\n",
+				ibd->ibd_udev_path);
+	} else
+		bl += sprintf(b + bl, "\n");
+
+	bl += sprintf(b + bl, "        ");
+	if (bd) {
+		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
+			MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
+			"" : (bd->bd_holder == ibd) ?
+			"CLAIMED: IBLOCK" : "CLAIMED: OS");
+	} else {
+		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
+	}
+
+	return bl;
+}
+
+static void iblock_bio_destructor(struct bio *bio)
+{
+	struct se_task *task = bio->bi_private;
+	struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
+
+	bio_free(bio, ib_dev->ibd_bio_set);
+}
+
+static struct bio *
+iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
+{
+	struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
+	struct iblock_req *ib_req = IBLOCK_REQ(task);
+	struct bio *bio;
+
+	/*
+	 * Only allocate as many vector entries as the bio code allows us to,
+	 * we'll loop later on until we have handled the whole request.
+	 */
+	if (sg_num > BIO_MAX_PAGES)
+		sg_num = BIO_MAX_PAGES;
+
+	bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
+	if (!bio) {
+		pr_err("Unable to allocate memory for bio\n");
+		return NULL;
+	}
+
+	pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
+		" %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set);
+	pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size);
+
+	bio->bi_bdev = ib_dev->ibd_bd;
+	bio->bi_private = task;
+	bio->bi_destructor = iblock_bio_destructor;
+	bio->bi_end_io = &iblock_bio_done;
+	bio->bi_sector = lba;
+	atomic_inc(&ib_req->pending);
+
+	pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
+	pr_debug("Set ib_req->pending: %d\n", atomic_read(&ib_req->pending));
+	return bio;
+}
+
+static void iblock_submit_bios(struct bio_list *list, int rw)
+{
+	struct blk_plug plug;
+	struct bio *bio;
+
+	blk_start_plug(&plug);
+	while ((bio = bio_list_pop(list)))
+		submit_bio(rw, bio);
+	blk_finish_plug(&plug);
+}
+
+static int iblock_do_task(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct se_device *dev = cmd->se_dev;
+	struct iblock_req *ibr = IBLOCK_REQ(task);
+	struct bio *bio;
+	struct bio_list list;
+	struct scatterlist *sg;
+	u32 i, sg_num = task->task_sg_nents;
+	sector_t block_lba;
+	unsigned bio_cnt;
+	int rw;
+
+	if (task->task_data_direction == DMA_TO_DEVICE) {
+		/*
+		 * Force data to disk if we pretend to not have a volatile
+		 * write cache, or the initiator set the Force Unit Access bit.
+		 */
+		if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
+		    (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
+		     (cmd->se_cmd_flags & SCF_FUA)))
+			rw = WRITE_FUA;
+		else
+			rw = WRITE;
+	} else {
+		rw = READ;
+	}
+
+	/*
+	 * Do starting conversion up from non 512-byte blocksize with
+	 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
+	 */
+	if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
+		block_lba = (task->task_lba << 3);
+	else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
+		block_lba = (task->task_lba << 2);
+	else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
+		block_lba = (task->task_lba << 1);
+	else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
+		block_lba = task->task_lba;
+	else {
+		pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
+				" %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		return -ENOSYS;
+	}
+
+	bio = iblock_get_bio(task, block_lba, sg_num);
+	if (!bio) {
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		return -ENOMEM;
+	}
+
+	bio_list_init(&list);
+	bio_list_add(&list, bio);
+	bio_cnt = 1;
+
+	for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+		/*
+		 * XXX: if the length the device accepts is shorter than the
+		 *	length of the S/G list entry this will cause and
+		 *	endless loop.  Better hope no driver uses huge pages.
+		 */
+		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
+				!= sg->length) {
+			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
+				iblock_submit_bios(&list, rw);
+				bio_cnt = 0;
+			}
+
+			bio = iblock_get_bio(task, block_lba, sg_num);
+			if (!bio)
+				goto fail;
+			bio_list_add(&list, bio);
+			bio_cnt++;
+		}
+
+		/* Always in 512 byte units for Linux/Block */
+		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+		sg_num--;
+	}
+
+	iblock_submit_bios(&list, rw);
+
+	if (atomic_dec_and_test(&ibr->pending)) {
+		transport_complete_task(task,
+				!atomic_read(&ibr->ib_bio_err_cnt));
+	}
+	return 0;
+
+fail:
+	while ((bio = bio_list_pop(&list)))
+		bio_put(bio);
+	cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	return -ENOMEM;
+}
+
+static u32 iblock_get_device_rev(struct se_device *dev)
+{
+	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+}
+
+static u32 iblock_get_device_type(struct se_device *dev)
+{
+	return TYPE_DISK;
+}
+
+static sector_t iblock_get_blocks(struct se_device *dev)
+{
+	struct iblock_dev *ibd = dev->dev_ptr;
+	struct block_device *bd = ibd->ibd_bd;
+	struct request_queue *q = bdev_get_queue(bd);
+
+	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
+}
+
+static void iblock_bio_done(struct bio *bio, int err)
+{
+	struct se_task *task = bio->bi_private;
+	struct iblock_req *ibr = IBLOCK_REQ(task);
+
+	/*
+	 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
+	 */
+	if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
+		err = -EIO;
+
+	if (err != 0) {
+		pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
+			" err: %d\n", bio, err);
+		/*
+		 * Bump the ib_bio_err_cnt and release bio.
+		 */
+		atomic_inc(&ibr->ib_bio_err_cnt);
+		smp_mb__after_atomic_inc();
+	}
+
+	bio_put(bio);
+
+	if (!atomic_dec_and_test(&ibr->pending))
+		return;
+
+	pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
+		 task, bio, task->task_lba,
+		 (unsigned long long)bio->bi_sector, err);
+
+	transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt));
+}
+
+static struct se_subsystem_api iblock_template = {
+	.name			= "iblock",
+	.owner			= THIS_MODULE,
+	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV,
+	.write_cache_emulated	= 1,
+	.fua_write_emulated	= 1,
+	.attach_hba		= iblock_attach_hba,
+	.detach_hba		= iblock_detach_hba,
+	.allocate_virtdevice	= iblock_allocate_virtdevice,
+	.create_virtdevice	= iblock_create_virtdevice,
+	.free_device		= iblock_free_device,
+	.alloc_task		= iblock_alloc_task,
+	.do_task		= iblock_do_task,
+	.do_discard		= iblock_do_discard,
+	.do_sync_cache		= iblock_emulate_sync_cache,
+	.free_task		= iblock_free_task,
+	.check_configfs_dev_params = iblock_check_configfs_dev_params,
+	.set_configfs_dev_params = iblock_set_configfs_dev_params,
+	.show_configfs_dev_params = iblock_show_configfs_dev_params,
+	.get_device_rev		= iblock_get_device_rev,
+	.get_device_type	= iblock_get_device_type,
+	.get_blocks		= iblock_get_blocks,
+};
+
+static int __init iblock_module_init(void)
+{
+	return transport_subsystem_register(&iblock_template);
+}
+
+static void iblock_module_exit(void)
+{
+	transport_subsystem_release(&iblock_template);
+}
+
+MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(iblock_module_init);
+module_exit(iblock_module_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_iblock.h b/ap/os/linux/linux-3.4.x/drivers/target/target_core_iblock.h
new file mode 100644
index 0000000..e929370
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_iblock.h
@@ -0,0 +1,24 @@
+#ifndef TARGET_CORE_IBLOCK_H
+#define TARGET_CORE_IBLOCK_H
+
+#define IBLOCK_VERSION		"4.0"
+
+#define IBLOCK_MAX_CDBS		16
+#define IBLOCK_LBA_SHIFT	9
+
+struct iblock_req {
+	struct se_task ib_task;
+	atomic_t pending;
+	atomic_t ib_bio_err_cnt;
+} ____cacheline_aligned;
+
+#define IBDF_HAS_UDEV_PATH		0x01
+
+struct iblock_dev {
+	unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
+	u32	ibd_flags;
+	struct bio_set	*ibd_bio_set;
+	struct block_device *ibd_bd;
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_IBLOCK_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_internal.h b/ap/os/linux/linux-3.4.x/drivers/target/target_core_internal.h
new file mode 100644
index 0000000..17179b1
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_internal.h
@@ -0,0 +1,126 @@
+#ifndef TARGET_CORE_INTERNAL_H
+#define TARGET_CORE_INTERNAL_H
+
+/* target_core_alua.c */
+extern struct t10_alua_lu_gp *default_lu_gp;
+
+/* target_core_cdb.c */
+int	target_emulate_inquiry(struct se_task *task);
+int	target_emulate_readcapacity(struct se_task *task);
+int	target_emulate_readcapacity_16(struct se_task *task);
+int	target_emulate_modesense(struct se_task *task);
+int	target_emulate_request_sense(struct se_task *task);
+int	target_emulate_unmap(struct se_task *task);
+int	target_emulate_write_same(struct se_task *task);
+int	target_emulate_synchronize_cache(struct se_task *task);
+int	target_emulate_noop(struct se_task *task);
+
+/* target_core_device.c */
+struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
+int	core_free_device_list_for_node(struct se_node_acl *,
+		struct se_portal_group *);
+void	core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
+void	core_update_device_list_access(u32, u32, struct se_node_acl *);
+int	core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *,
+		u32, u32, struct se_node_acl *, struct se_portal_group *, int);
+void	core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
+int	core_dev_export(struct se_device *, struct se_portal_group *,
+		struct se_lun *);
+void	core_dev_unexport(struct se_device *, struct se_portal_group *,
+		struct se_lun *);
+int	target_report_luns(struct se_task *);
+void	se_release_device_for_hba(struct se_device *);
+void	se_release_vpd_for_dev(struct se_device *);
+int	se_free_virtual_device(struct se_device *, struct se_hba *);
+int	se_dev_check_online(struct se_device *);
+int	se_dev_check_shutdown(struct se_device *);
+void	se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
+int	se_dev_set_task_timeout(struct se_device *, u32);
+int	se_dev_set_max_unmap_lba_count(struct se_device *, u32);
+int	se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
+int	se_dev_set_unmap_granularity(struct se_device *, u32);
+int	se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
+int	se_dev_set_emulate_dpo(struct se_device *, int);
+int	se_dev_set_emulate_fua_write(struct se_device *, int);
+int	se_dev_set_emulate_fua_read(struct se_device *, int);
+int	se_dev_set_emulate_write_cache(struct se_device *, int);
+int	se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
+int	se_dev_set_emulate_tas(struct se_device *, int);
+int	se_dev_set_emulate_tpu(struct se_device *, int);
+int	se_dev_set_emulate_tpws(struct se_device *, int);
+int	se_dev_set_enforce_pr_isids(struct se_device *, int);
+int	se_dev_set_is_nonrot(struct se_device *, int);
+int	se_dev_set_emulate_rest_reord(struct se_device *dev, int);
+int	se_dev_set_queue_depth(struct se_device *, u32);
+int	se_dev_set_max_sectors(struct se_device *, u32);
+int	se_dev_set_fabric_max_sectors(struct se_device *, u32);
+int	se_dev_set_optimal_sectors(struct se_device *, u32);
+int	se_dev_set_block_size(struct se_device *, u32);
+struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
+		struct se_device *, u32);
+int	core_dev_del_lun(struct se_portal_group *, u32);
+struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
+struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
+		struct se_node_acl *, u32, int *);
+int	core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
+		struct se_lun_acl *, u32, u32);
+int	core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
+		struct se_lun *, struct se_lun_acl *);
+void	core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
+		struct se_lun_acl *lacl);
+int	core_dev_setup_virtual_lun0(void);
+void	core_dev_release_virtual_lun0(void);
+
+/* target_core_hba.c */
+struct se_hba *core_alloc_hba(const char *, u32, u32);
+int	core_delete_hba(struct se_hba *);
+
+/* target_core_tmr.c */
+void	core_tmr_abort_task(struct se_device *, struct se_tmr_req *,
+			struct se_session *);
+int	core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
+		struct list_head *, struct se_cmd *);
+
+/* target_core_tpg.c */
+extern struct se_device *g_lun0_dev;
+
+struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
+		const char *);
+struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
+		unsigned char *);
+void	core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
+void	core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
+struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
+int	core_tpg_post_addlun(struct se_portal_group *, struct se_lun *,
+		u32, void *);
+struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun);
+int	core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
+
+/* target_core_transport.c */
+extern struct kmem_cache *se_tmr_req_cache;
+
+int	init_se_kmem_caches(void);
+void	release_se_kmem_caches(void);
+u32	scsi_get_new_index(scsi_index_t);
+void	transport_subsystem_check_init(void);
+void	transport_cmd_finish_abort(struct se_cmd *, int);
+void	__transport_remove_task_from_execute_queue(struct se_task *,
+		struct se_device *);
+unsigned char *transport_dump_cmd_direction(struct se_cmd *);
+void	transport_dump_dev_state(struct se_device *, char *, int *);
+void	transport_dump_dev_info(struct se_device *, struct se_lun *,
+		unsigned long long, char *, int *);
+void	transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int);
+int	transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
+int	transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
+int	transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
+bool	target_stop_task(struct se_task *task, unsigned long *flags);
+int	transport_clear_lun_from_sessions(struct se_lun *);
+void	transport_send_task_abort(struct se_cmd *);
+
+/* target_core_stat.c */
+void	target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
+void	target_stat_setup_port_default_groups(struct se_lun *);
+void	target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
+
+#endif /* TARGET_CORE_INTERNAL_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_pr.c b/ap/os/linux/linux-3.4.x/drivers/target/target_core_pr.c
new file mode 100644
index 0000000..89d10e6
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_pr.c
@@ -0,0 +1,4408 @@
+/*******************************************************************************
+ * Filename:  target_core_pr.c
+ *
+ * This file contains SPC-3 compliant persistent reservations and
+ * legacy SPC-2 reservations with compatible reservation handling (CRH=1)
+ *
+ * Copyright (c) 2009, 2010 Rising Tide Systems
+ * Copyright (c) 2009, 2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <asm/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_internal.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+/*
+ * Used for Specify Initiator Ports Capable Bit (SPEC_I_PT)
+ */
+struct pr_transport_id_holder {
+	int dest_local_nexus;
+	struct t10_pr_registration *dest_pr_reg;
+	struct se_portal_group *dest_tpg;
+	struct se_node_acl *dest_node_acl;
+	struct se_dev_entry *dest_se_deve;
+	struct list_head dest_list;
+};
+
+int core_pr_dump_initiator_port(
+	struct t10_pr_registration *pr_reg,
+	char *buf,
+	u32 size)
+{
+	if (!pr_reg->isid_present_at_reg)
+		return 0;
+
+	snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]);
+	return 1;
+}
+
+static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
+			struct t10_pr_registration *, int);
+
+static int core_scsi2_reservation_seq_non_holder(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u32 pr_reg_type)
+{
+	switch (cdb[0]) {
+	case INQUIRY:
+	case RELEASE:
+	case RELEASE_10:
+		return 0;
+	default:
+		return 1;
+	}
+
+	return 1;
+}
+
+static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+	int ret;
+
+	if (!sess)
+		return 0;
+
+	spin_lock(&dev->dev_reservation_lock);
+	if (!dev->dev_reserved_node_acl || !sess) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+	if (dev->dev_reserved_node_acl != sess->se_node_acl) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return -EINVAL;
+	}
+	if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+	ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -EINVAL;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return ret;
+}
+
+static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *,
+					struct se_node_acl *, struct se_session *);
+static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
+
+static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd)
+{
+	struct se_session *se_sess = cmd->se_sess;
+	struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
+	struct t10_pr_registration *pr_reg;
+	struct t10_reservation *pr_tmpl = &su_dev->t10_pr;
+	int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
+	int conflict = 0;
+
+	if (!crh)
+		return -EINVAL;
+
+	pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
+			se_sess);
+	if (pr_reg) {
+		/*
+		 * From spc4r17 5.7.3 Exceptions to SPC-2 RESERVE and RELEASE
+		 * behavior
+		 *
+		 * A RESERVE(6) or RESERVE(10) command shall complete with GOOD
+		 * status, but no reservation shall be established and the
+		 * persistent reservation shall not be changed, if the command
+		 * is received from a) and b) below.
+		 *
+		 * A RELEASE(6) or RELEASE(10) command shall complete with GOOD
+		 * status, but the persistent reservation shall not be released,
+		 * if the command is received from a) and b)
+		 *
+		 * a) An I_T nexus that is a persistent reservation holder; or
+		 * b) An I_T nexus that is registered if a registrants only or
+		 *    all registrants type persistent reservation is present.
+		 *
+		 * In all other cases, a RESERVE(6) command, RESERVE(10) command,
+		 * RELEASE(6) command, or RELEASE(10) command shall be processed
+		 * as defined in SPC-2.
+		 */
+		if (pr_reg->pr_res_holder) {
+			core_scsi3_put_pr_reg(pr_reg);
+			return 1;
+		}
+		if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
+		    (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) ||
+		    (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+		    (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+			core_scsi3_put_pr_reg(pr_reg);
+			return 1;
+		}
+		core_scsi3_put_pr_reg(pr_reg);
+		conflict = 1;
+	} else {
+		/*
+		 * Following spc2r20 5.5.1 Reservations overview:
+		 *
+		 * If a logical unit has executed a PERSISTENT RESERVE OUT
+		 * command with the REGISTER or the REGISTER AND IGNORE
+		 * EXISTING KEY service action and is still registered by any
+		 * initiator, all RESERVE commands and all RELEASE commands
+		 * regardless of initiator shall conflict and shall terminate
+		 * with a RESERVATION CONFLICT status.
+		 */
+		spin_lock(&pr_tmpl->registration_lock);
+		conflict = (list_empty(&pr_tmpl->registration_list)) ? 0 : 1;
+		spin_unlock(&pr_tmpl->registration_lock);
+	}
+
+	if (conflict) {
+		pr_err("Received legacy SPC-2 RESERVE/RELEASE"
+			" while active SPC-3 registrations exist,"
+			" returning RESERVATION_CONFLICT\n");
+		cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+int target_scsi2_reservation_release(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+	struct se_portal_group *tpg = sess->se_tpg;
+	int ret = 0, rc;
+
+	if (!sess || !tpg)
+		goto out;
+	rc = target_check_scsi2_reservation_conflict(cmd);
+	if (rc == 1)
+		goto out;
+	else if (rc < 0) {
+		cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = 0;
+	spin_lock(&dev->dev_reservation_lock);
+	if (!dev->dev_reserved_node_acl || !sess)
+		goto out_unlock;
+
+	if (dev->dev_reserved_node_acl != sess->se_node_acl)
+		goto out_unlock;
+
+	if (dev->dev_res_bin_isid != sess->sess_bin_isid)
+		goto out_unlock;
+
+	dev->dev_reserved_node_acl = NULL;
+	dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
+	if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
+		dev->dev_res_bin_isid = 0;
+		dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
+	}
+	pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
+		" MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+		cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
+		sess->se_node_acl->initiatorname);
+
+out_unlock:
+	spin_unlock(&dev->dev_reservation_lock);
+out:
+	if (!ret) {
+		task->task_scsi_status = GOOD;
+		transport_complete_task(task, 1);
+	}
+	return ret;
+}
+
+int target_scsi2_reservation_reserve(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+	struct se_portal_group *tpg = sess->se_tpg;
+	int ret = 0, rc;
+
+	if ((cmd->t_task_cdb[1] & 0x01) &&
+	    (cmd->t_task_cdb[1] & 0x02)) {
+		pr_err("LongIO and Obselete Bits set, returning"
+				" ILLEGAL_REQUEST\n");
+		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+		ret = -EINVAL;
+		goto out;
+	}
+	/*
+	 * This is currently the case for target_core_mod passthrough struct se_cmd
+	 * ops
+	 */
+	if (!sess || !tpg)
+		goto out;
+	rc = target_check_scsi2_reservation_conflict(cmd);
+	if (rc == 1)
+		goto out;
+	else if (rc < 0) {
+		cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = 0;
+	spin_lock(&dev->dev_reservation_lock);
+	if (dev->dev_reserved_node_acl &&
+	   (dev->dev_reserved_node_acl != sess->se_node_acl)) {
+		pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
+			tpg->se_tpg_tfo->get_fabric_name());
+		pr_err("Original reserver LUN: %u %s\n",
+			cmd->se_lun->unpacked_lun,
+			dev->dev_reserved_node_acl->initiatorname);
+		pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u"
+			" from %s \n", cmd->se_lun->unpacked_lun,
+			cmd->se_deve->mapped_lun,
+			sess->se_node_acl->initiatorname);
+		cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	dev->dev_reserved_node_acl = sess->se_node_acl;
+	dev->dev_flags |= DF_SPC2_RESERVATIONS;
+	if (sess->sess_bin_isid != 0) {
+		dev->dev_res_bin_isid = sess->sess_bin_isid;
+		dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
+	}
+	pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
+		" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+		cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
+		sess->se_node_acl->initiatorname);
+
+out_unlock:
+	spin_unlock(&dev->dev_reservation_lock);
+out:
+	if (!ret) {
+		task->task_scsi_status = GOOD;
+		transport_complete_task(task, 1);
+	}
+	return ret;
+}
+
+
+/*
+ * Begin SPC-3/SPC-4 Persistent Reservations emulation support
+ *
+ * This function is called by those initiator ports who are *NOT*
+ * the active PR reservation holder when a reservation is present.
+ */
+static int core_scsi3_pr_seq_non_holder(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u32 pr_reg_type)
+{
+	struct se_dev_entry *se_deve;
+	struct se_session *se_sess = cmd->se_sess;
+	int other_cdb = 0, ignore_reg;
+	int registered_nexus = 0, ret = 1; /* Conflict by default */
+	int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */
+	int we = 0; /* Write Exclusive */
+	int legacy = 0; /* Act like a legacy device and return
+			 * RESERVATION CONFLICT on some CDBs */
+	/*
+	 * A legacy SPC-2 reservation is being held.
+	 */
+	if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS)
+		return core_scsi2_reservation_seq_non_holder(cmd,
+					cdb, pr_reg_type);
+
+	se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	/*
+	 * Determine if the registration should be ignored due to
+	 * non-matching ISIDs in core_scsi3_pr_reservation_check().
+	 */
+	ignore_reg = (pr_reg_type & 0x80000000);
+	if (ignore_reg)
+		pr_reg_type &= ~0x80000000;
+
+	switch (pr_reg_type) {
+	case PR_TYPE_WRITE_EXCLUSIVE:
+		we = 1;
+	case PR_TYPE_EXCLUSIVE_ACCESS:
+		/*
+		 * Some commands are only allowed for the persistent reservation
+		 * holder.
+		 */
+		if ((se_deve->def_pr_registered) && !(ignore_reg))
+			registered_nexus = 1;
+		break;
+	case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+		we = 1;
+	case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+		/*
+		 * Some commands are only allowed for registered I_T Nexuses.
+		 */
+		reg_only = 1;
+		if ((se_deve->def_pr_registered) && !(ignore_reg))
+			registered_nexus = 1;
+		break;
+	case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+		we = 1;
+	case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+		/*
+		 * Each registered I_T Nexus is a reservation holder.
+		 */
+		all_reg = 1;
+		if ((se_deve->def_pr_registered) && !(ignore_reg))
+			registered_nexus = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+	/*
+	 * Referenced from spc4r17 table 45 for *NON* PR holder access
+	 */
+	switch (cdb[0]) {
+	case SECURITY_PROTOCOL_IN:
+		if (registered_nexus)
+			return 0;
+		ret = (we) ? 0 : 1;
+		break;
+	case MODE_SENSE:
+	case MODE_SENSE_10:
+	case READ_ATTRIBUTE:
+	case READ_BUFFER:
+	case RECEIVE_DIAGNOSTIC:
+		if (legacy) {
+			ret = 1;
+			break;
+		}
+		if (registered_nexus) {
+			ret = 0;
+			break;
+		}
+		ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+		break;
+	case PERSISTENT_RESERVE_OUT:
+		/*
+		 * This follows PERSISTENT_RESERVE_OUT service actions that
+		 * are allowed in the presence of various reservations.
+		 * See spc4r17, table 46
+		 */
+		switch (cdb[1] & 0x1f) {
+		case PRO_CLEAR:
+		case PRO_PREEMPT:
+		case PRO_PREEMPT_AND_ABORT:
+			ret = (registered_nexus) ? 0 : 1;
+			break;
+		case PRO_REGISTER:
+		case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
+			ret = 0;
+			break;
+		case PRO_REGISTER_AND_MOVE:
+		case PRO_RESERVE:
+			ret = 1;
+			break;
+		case PRO_RELEASE:
+			ret = (registered_nexus) ? 0 : 1;
+			break;
+		default:
+			pr_err("Unknown PERSISTENT_RESERVE_OUT service"
+				" action: 0x%02x\n", cdb[1] & 0x1f);
+			return -EINVAL;
+		}
+		break;
+	case RELEASE:
+	case RELEASE_10:
+		/* Handled by CRH=1 in target_scsi2_reservation_release() */
+		ret = 0;
+		break;
+	case RESERVE:
+	case RESERVE_10:
+		/* Handled by CRH=1 in target_scsi2_reservation_reserve() */
+		ret = 0;
+		break;
+	case TEST_UNIT_READY:
+		ret = (legacy) ? 1 : 0; /* Conflict for legacy */
+		break;
+	case MAINTENANCE_IN:
+		switch (cdb[1] & 0x1f) {
+		case MI_MANAGEMENT_PROTOCOL_IN:
+			if (registered_nexus) {
+				ret = 0;
+				break;
+			}
+			ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+			break;
+		case MI_REPORT_SUPPORTED_OPERATION_CODES:
+		case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS:
+			if (legacy) {
+				ret = 1;
+				break;
+			}
+			if (registered_nexus) {
+				ret = 0;
+				break;
+			}
+			ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+			break;
+		case MI_REPORT_ALIASES:
+		case MI_REPORT_IDENTIFYING_INFORMATION:
+		case MI_REPORT_PRIORITY:
+		case MI_REPORT_TARGET_PGS:
+		case MI_REPORT_TIMESTAMP:
+			ret = 0; /* Allowed */
+			break;
+		default:
+			pr_err("Unknown MI Service Action: 0x%02x\n",
+				(cdb[1] & 0x1f));
+			return -EINVAL;
+		}
+		break;
+	case ACCESS_CONTROL_IN:
+	case ACCESS_CONTROL_OUT:
+	case INQUIRY:
+	case LOG_SENSE:
+	case READ_MEDIA_SERIAL_NUMBER:
+	case REPORT_LUNS:
+	case REQUEST_SENSE:
+	case PERSISTENT_RESERVE_IN:
+		ret = 0; /*/ Allowed CDBs */
+		break;
+	default:
+		other_cdb = 1;
+		break;
+	}
+	/*
+	 * Case where the CDB is explicitly allowed in the above switch
+	 * statement.
+	 */
+	if (!ret && !other_cdb) {
+#if 0
+		pr_debug("Allowing explict CDB: 0x%02x for %s"
+			" reservation holder\n", cdb[0],
+			core_scsi3_pr_dump_type(pr_reg_type));
+#endif
+		return ret;
+	}
+	/*
+	 * Check if write exclusive initiator ports *NOT* holding the
+	 * WRITE_EXCLUSIVE_* reservation.
+	 */
+	if ((we) && !(registered_nexus)) {
+		if (cmd->data_direction == DMA_TO_DEVICE) {
+			/*
+			 * Conflict for write exclusive
+			 */
+			pr_debug("%s Conflict for unregistered nexus"
+				" %s CDB: 0x%02x to %s reservation\n",
+				transport_dump_cmd_direction(cmd),
+				se_sess->se_node_acl->initiatorname, cdb[0],
+				core_scsi3_pr_dump_type(pr_reg_type));
+			return 1;
+		} else {
+			/*
+			 * Allow non WRITE CDBs for all Write Exclusive
+			 * PR TYPEs to pass for registered and
+			 * non-registered_nexuxes NOT holding the reservation.
+			 *
+			 * We only make noise for the unregisterd nexuses,
+			 * as we expect registered non-reservation holding
+			 * nexuses to issue CDBs.
+			 */
+#if 0
+			if (!registered_nexus) {
+				pr_debug("Allowing implict CDB: 0x%02x"
+					" for %s reservation on unregistered"
+					" nexus\n", cdb[0],
+					core_scsi3_pr_dump_type(pr_reg_type));
+			}
+#endif
+			return 0;
+		}
+	} else if ((reg_only) || (all_reg)) {
+		if (registered_nexus) {
+			/*
+			 * For PR_*_REG_ONLY and PR_*_ALL_REG reservations,
+			 * allow commands from registered nexuses.
+			 */
+#if 0
+			pr_debug("Allowing implict CDB: 0x%02x for %s"
+				" reservation\n", cdb[0],
+				core_scsi3_pr_dump_type(pr_reg_type));
+#endif
+			return 0;
+		}
+	}
+	pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
+		" for %s reservation\n", transport_dump_cmd_direction(cmd),
+		(registered_nexus) ? "" : "un",
+		se_sess->se_node_acl->initiatorname, cdb[0],
+		core_scsi3_pr_dump_type(pr_reg_type));
+
+	return 1; /* Conflict by default */
+}
+
+static u32 core_scsi3_pr_generation(struct se_device *dev)
+{
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	u32 prg;
+	/*
+	 * PRGeneration field shall contain the value of a 32-bit wrapping
+	 * counter mainted by the device server.
+	 *
+	 * Note that this is done regardless of Active Persist across
+	 * Target PowerLoss (APTPL)
+	 *
+	 * See spc4r17 section 6.3.12 READ_KEYS service action
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	prg = su_dev->t10_pr.pr_generation++;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return prg;
+}
+
+static int core_scsi3_pr_reservation_check(
+	struct se_cmd *cmd,
+	u32 *pr_reg_type)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+	int ret;
+
+	if (!sess)
+		return 0;
+	/*
+	 * A legacy SPC-2 reservation is being held.
+	 */
+	if (dev->dev_flags & DF_SPC2_RESERVATIONS)
+		return core_scsi2_reservation_check(cmd, pr_reg_type);
+
+	spin_lock(&dev->dev_reservation_lock);
+	if (!dev->dev_pr_res_holder) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+	*pr_reg_type = dev->dev_pr_res_holder->pr_res_type;
+	cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
+	if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return -EINVAL;
+	}
+	if (!dev->dev_pr_res_holder->isid_present_at_reg) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+	ret = (dev->dev_pr_res_holder->pr_reg_bin_isid ==
+	       sess->sess_bin_isid) ? 0 : -EINVAL;
+	/*
+	 * Use bit in *pr_reg_type to notify ISID mismatch in
+	 * core_scsi3_pr_seq_non_holder().
+	 */
+	if (ret != 0)
+		*pr_reg_type |= 0x80000000;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return ret;
+}
+
+static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct se_dev_entry *deve,
+	unsigned char *isid,
+	u64 sa_res_key,
+	int all_tg_pt,
+	int aptpl)
+{
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	struct t10_pr_registration *pr_reg;
+
+	pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
+	if (!pr_reg) {
+		pr_err("Unable to allocate struct t10_pr_registration\n");
+		return NULL;
+	}
+
+	pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len,
+					GFP_ATOMIC);
+	if (!pr_reg->pr_aptpl_buf) {
+		pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n");
+		kmem_cache_free(t10_pr_reg_cache, pr_reg);
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&pr_reg->pr_reg_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
+	atomic_set(&pr_reg->pr_res_holders, 0);
+	pr_reg->pr_reg_nacl = nacl;
+	pr_reg->pr_reg_deve = deve;
+	pr_reg->pr_res_mapped_lun = deve->mapped_lun;
+	pr_reg->pr_aptpl_target_lun = deve->se_lun->unpacked_lun;
+	pr_reg->pr_res_key = sa_res_key;
+	pr_reg->pr_reg_all_tg_pt = all_tg_pt;
+	pr_reg->pr_reg_aptpl = aptpl;
+	pr_reg->pr_reg_tg_pt_lun = deve->se_lun;
+	/*
+	 * If an ISID value for this SCSI Initiator Port exists,
+	 * save it to the registration now.
+	 */
+	if (isid != NULL) {
+		pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid);
+		snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid);
+		pr_reg->isid_present_at_reg = 1;
+	}
+
+	return pr_reg;
+}
+
+static int core_scsi3_lunacl_depend_item(struct se_dev_entry *);
+static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *);
+
+/*
+ * Function used for handling PR registrations for ALL_TG_PT=1 and ALL_TG_PT=0
+ * modes.
+ */
+static struct t10_pr_registration *__core_scsi3_alloc_registration(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct se_dev_entry *deve,
+	unsigned char *isid,
+	u64 sa_res_key,
+	int all_tg_pt,
+	int aptpl)
+{
+	struct se_dev_entry *deve_tmp;
+	struct se_node_acl *nacl_tmp;
+	struct se_port *port, *port_tmp;
+	struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+	struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe;
+	int ret;
+	/*
+	 * Create a registration for the I_T Nexus upon which the
+	 * PROUT REGISTER was received.
+	 */
+	pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid,
+			sa_res_key, all_tg_pt, aptpl);
+	if (!pr_reg)
+		return NULL;
+	/*
+	 * Return pointer to pr_reg for ALL_TG_PT=0
+	 */
+	if (!all_tg_pt)
+		return pr_reg;
+	/*
+	 * Create list of matching SCSI Initiator Port registrations
+	 * for ALL_TG_PT=1
+	 */
+	spin_lock(&dev->se_port_lock);
+	list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
+		atomic_inc(&port->sep_tg_pt_ref_cnt);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&dev->se_port_lock);
+
+		spin_lock_bh(&port->sep_alua_lock);
+		list_for_each_entry(deve_tmp, &port->sep_alua_list,
+					alua_port_list) {
+			/*
+			 * This pointer will be NULL for demo mode MappedLUNs
+			 * that have not been make explict via a ConfigFS
+			 * MappedLUN group for the SCSI Initiator Node ACL.
+			 */
+			if (!deve_tmp->se_lun_acl)
+				continue;
+
+			nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl;
+			/*
+			 * Skip the matching struct se_node_acl that is allocated
+			 * above..
+			 */
+			if (nacl == nacl_tmp)
+				continue;
+			/*
+			 * Only perform PR registrations for target ports on
+			 * the same fabric module as the REGISTER w/ ALL_TG_PT=1
+			 * arrived.
+			 */
+			if (tfo != nacl_tmp->se_tpg->se_tpg_tfo)
+				continue;
+			/*
+			 * Look for a matching Initiator Node ACL in ASCII format
+			 */
+			if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname))
+				continue;
+
+			atomic_inc(&deve_tmp->pr_ref_count);
+			smp_mb__after_atomic_inc();
+			spin_unlock_bh(&port->sep_alua_lock);
+			/*
+			 * Grab a configfs group dependency that is released
+			 * for the exception path at label out: below, or upon
+			 * completion of adding ALL_TG_PT=1 registrations in
+			 * __core_scsi3_add_registration()
+			 */
+			ret = core_scsi3_lunacl_depend_item(deve_tmp);
+			if (ret < 0) {
+				pr_err("core_scsi3_lunacl_depend"
+						"_item() failed\n");
+				atomic_dec(&port->sep_tg_pt_ref_cnt);
+				smp_mb__after_atomic_dec();
+				atomic_dec(&deve_tmp->pr_ref_count);
+				smp_mb__after_atomic_dec();
+				goto out;
+			}
+			/*
+			 * Located a matching SCSI Initiator Port on a different
+			 * port, allocate the pr_reg_atp and attach it to the
+			 * pr_reg->pr_reg_atp_list that will be processed once
+			 * the original *pr_reg is processed in
+			 * __core_scsi3_add_registration()
+			 */
+			pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
+						nacl_tmp, deve_tmp, NULL,
+						sa_res_key, all_tg_pt, aptpl);
+			if (!pr_reg_atp) {
+				atomic_dec(&port->sep_tg_pt_ref_cnt);
+				smp_mb__after_atomic_dec();
+				atomic_dec(&deve_tmp->pr_ref_count);
+				smp_mb__after_atomic_dec();
+				core_scsi3_lunacl_undepend_item(deve_tmp);
+				goto out;
+			}
+
+			list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list,
+				      &pr_reg->pr_reg_atp_list);
+			spin_lock_bh(&port->sep_alua_lock);
+		}
+		spin_unlock_bh(&port->sep_alua_lock);
+
+		spin_lock(&dev->se_port_lock);
+		atomic_dec(&port->sep_tg_pt_ref_cnt);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	return pr_reg;
+out:
+	list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+			&pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
+		list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+		core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+		kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
+	}
+	kmem_cache_free(t10_pr_reg_cache, pr_reg);
+	return NULL;
+}
+
+int core_scsi3_alloc_aptpl_registration(
+	struct t10_reservation *pr_tmpl,
+	u64 sa_res_key,
+	unsigned char *i_port,
+	unsigned char *isid,
+	u32 mapped_lun,
+	unsigned char *t_port,
+	u16 tpgt,
+	u32 target_lun,
+	int res_holder,
+	int all_tg_pt,
+	u8 type)
+{
+	struct t10_pr_registration *pr_reg;
+
+	if (!i_port || !t_port || !sa_res_key) {
+		pr_err("Illegal parameters for APTPL registration\n");
+		return -EINVAL;
+	}
+
+	pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL);
+	if (!pr_reg) {
+		pr_err("Unable to allocate struct t10_pr_registration\n");
+		return -ENOMEM;
+	}
+	pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL);
+
+	INIT_LIST_HEAD(&pr_reg->pr_reg_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
+	atomic_set(&pr_reg->pr_res_holders, 0);
+	pr_reg->pr_reg_nacl = NULL;
+	pr_reg->pr_reg_deve = NULL;
+	pr_reg->pr_res_mapped_lun = mapped_lun;
+	pr_reg->pr_aptpl_target_lun = target_lun;
+	pr_reg->pr_res_key = sa_res_key;
+	pr_reg->pr_reg_all_tg_pt = all_tg_pt;
+	pr_reg->pr_reg_aptpl = 1;
+	pr_reg->pr_reg_tg_pt_lun = NULL;
+	pr_reg->pr_res_scope = 0; /* Always LUN_SCOPE */
+	pr_reg->pr_res_type = type;
+	/*
+	 * If an ISID value had been saved in APTPL metadata for this
+	 * SCSI Initiator Port, restore it now.
+	 */
+	if (isid != NULL) {
+		pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid);
+		snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid);
+		pr_reg->isid_present_at_reg = 1;
+	}
+	/*
+	 * Copy the i_port and t_port information from caller.
+	 */
+	snprintf(pr_reg->pr_iport, PR_APTPL_MAX_IPORT_LEN, "%s", i_port);
+	snprintf(pr_reg->pr_tport, PR_APTPL_MAX_TPORT_LEN, "%s", t_port);
+	pr_reg->pr_reg_tpgt = tpgt;
+	/*
+	 * Set pr_res_holder from caller, the pr_reg who is the reservation
+	 * holder will get it's pointer set in core_scsi3_aptpl_reserve() once
+	 * the Initiator Node LUN ACL from the fabric module is created for
+	 * this registration.
+	 */
+	pr_reg->pr_res_holder = res_holder;
+
+	list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list);
+	pr_debug("SPC-3 PR APTPL Successfully added registration%s from"
+			" metadata\n", (res_holder) ? "+reservation" : "");
+	return 0;
+}
+
+static void core_scsi3_aptpl_reserve(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_node_acl *node_acl,
+	struct t10_pr_registration *pr_reg)
+{
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+
+	spin_lock(&dev->dev_reservation_lock);
+	dev->dev_pr_res_holder = pr_reg;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	pr_debug("SPC-3 PR [%s] Service Action: APTPL RESERVE created"
+		" new reservation holder TYPE: %s ALL_TG_PT: %d\n",
+		tpg->se_tpg_tfo->get_fabric_name(),
+		core_scsi3_pr_dump_type(pr_reg->pr_res_type),
+		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+	pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
+		tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname,
+		(prf_isid) ? &i_buf[0] : "");
+}
+
+static void __core_scsi3_add_registration(struct se_device *, struct se_node_acl *,
+				struct t10_pr_registration *, int, int);
+
+static int __core_scsi3_check_aptpl_registration(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_lun *lun,
+	u32 target_lun,
+	struct se_node_acl *nacl,
+	struct se_dev_entry *deve)
+{
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+	unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
+	unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
+	u16 tpgt;
+
+	memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
+	memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
+	/*
+	 * Copy Initiator Port information from struct se_node_acl
+	 */
+	snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname);
+	snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s",
+			tpg->se_tpg_tfo->tpg_get_wwn(tpg));
+	tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
+	/*
+	 * Look for the matching registrations+reservation from those
+	 * created from APTPL metadata.  Note that multiple registrations
+	 * may exist for fabrics that use ISIDs in their SCSI Initiator Port
+	 * TransportIDs.
+	 */
+	spin_lock(&pr_tmpl->aptpl_reg_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
+				pr_reg_aptpl_list) {
+		if (!strcmp(pr_reg->pr_iport, i_port) &&
+		     (pr_reg->pr_res_mapped_lun == deve->mapped_lun) &&
+		    !(strcmp(pr_reg->pr_tport, t_port)) &&
+		     (pr_reg->pr_reg_tpgt == tpgt) &&
+		     (pr_reg->pr_aptpl_target_lun == target_lun)) {
+
+			pr_reg->pr_reg_nacl = nacl;
+			pr_reg->pr_reg_deve = deve;
+			pr_reg->pr_reg_tg_pt_lun = lun;
+
+			list_del(&pr_reg->pr_reg_aptpl_list);
+			spin_unlock(&pr_tmpl->aptpl_reg_lock);
+			/*
+			 * At this point all of the pointers in *pr_reg will
+			 * be setup, so go ahead and add the registration.
+			 */
+
+			__core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0);
+			/*
+			 * If this registration is the reservation holder,
+			 * make that happen now..
+			 */
+			if (pr_reg->pr_res_holder)
+				core_scsi3_aptpl_reserve(dev, tpg,
+						nacl, pr_reg);
+			/*
+			 * Reenable pr_aptpl_active to accept new metadata
+			 * updates once the SCSI device is active again..
+			 */
+			spin_lock(&pr_tmpl->aptpl_reg_lock);
+			pr_tmpl->pr_aptpl_active = 1;
+		}
+	}
+	spin_unlock(&pr_tmpl->aptpl_reg_lock);
+
+	return 0;
+}
+
+int core_scsi3_check_aptpl_registration(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_lun *lun,
+	struct se_lun_acl *lun_acl)
+{
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	struct se_node_acl *nacl = lun_acl->se_lun_nacl;
+	struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun];
+
+	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return 0;
+
+	return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
+				lun->unpacked_lun, nacl, deve);
+}
+
+static void __core_scsi3_dump_registration(
+	struct target_core_fabric_ops *tfo,
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct t10_pr_registration *pr_reg,
+	int register_type)
+{
+	struct se_portal_group *se_tpg = nacl->se_tpg;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN);
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+
+	pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
+		" Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ?
+		"_AND_MOVE" : (register_type == 1) ?
+		"_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
+		(prf_isid) ? i_buf : "");
+	pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
+		 tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg),
+		tfo->tpg_get_tag(se_tpg));
+	pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+		" Port(s)\n",  tfo->get_fabric_name(),
+		(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
+		dev->transport->name);
+	pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+		" 0x%08x  APTPL: %d\n", tfo->get_fabric_name(),
+		pr_reg->pr_res_key, pr_reg->pr_res_generation,
+		pr_reg->pr_reg_aptpl);
+}
+
+/*
+ * this function can be called with struct se_device->dev_reservation_lock
+ * when register_move = 1
+ */
+static void __core_scsi3_add_registration(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct t10_pr_registration *pr_reg,
+	int register_type,
+	int register_move)
+{
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+	struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
+	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+
+	/*
+	 * Increment PRgeneration counter for struct se_device upon a successful
+	 * REGISTER, see spc4r17 section 6.3.2 READ_KEYS service action
+	 *
+	 * Also, when register_move = 1 for PROUT REGISTER_AND_MOVE service
+	 * action, the struct se_device->dev_reservation_lock will already be held,
+	 * so we do not call core_scsi3_pr_generation() which grabs the lock
+	 * for the REGISTER.
+	 */
+	pr_reg->pr_res_generation = (register_move) ?
+			su_dev->t10_pr.pr_generation++ :
+			core_scsi3_pr_generation(dev);
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_add_tail(&pr_reg->pr_reg_list, &pr_tmpl->registration_list);
+	pr_reg->pr_reg_deve->def_pr_registered = 1;
+
+	__core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type);
+	spin_unlock(&pr_tmpl->registration_lock);
+	/*
+	 * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
+	 */
+	if (!pr_reg->pr_reg_all_tg_pt || register_move)
+		return;
+	/*
+	 * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
+	 * allocated in __core_scsi3_alloc_registration()
+	 */
+	list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+			&pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
+		list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+
+		pr_reg_tmp->pr_res_generation = core_scsi3_pr_generation(dev);
+
+		spin_lock(&pr_tmpl->registration_lock);
+		list_add_tail(&pr_reg_tmp->pr_reg_list,
+			      &pr_tmpl->registration_list);
+		pr_reg_tmp->pr_reg_deve->def_pr_registered = 1;
+
+		__core_scsi3_dump_registration(tfo, dev,
+				pr_reg_tmp->pr_reg_nacl, pr_reg_tmp,
+				register_type);
+		spin_unlock(&pr_tmpl->registration_lock);
+		/*
+		 * Drop configfs group dependency reference from
+		 * __core_scsi3_alloc_registration()
+		 */
+		core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+	}
+}
+
+static int core_scsi3_alloc_registration(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct se_dev_entry *deve,
+	unsigned char *isid,
+	u64 sa_res_key,
+	int all_tg_pt,
+	int aptpl,
+	int register_type,
+	int register_move)
+{
+	struct t10_pr_registration *pr_reg;
+
+	pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid,
+			sa_res_key, all_tg_pt, aptpl);
+	if (!pr_reg)
+		return -EPERM;
+
+	__core_scsi3_add_registration(dev, nacl, pr_reg,
+			register_type, register_move);
+	return 0;
+}
+
+static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	unsigned char *isid)
+{
+	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+	struct se_portal_group *tpg;
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+		/*
+		 * First look for a matching struct se_node_acl
+		 */
+		if (pr_reg->pr_reg_nacl != nacl)
+			continue;
+
+		tpg = pr_reg->pr_reg_nacl->se_tpg;
+		/*
+		 * If this registration does NOT contain a fabric provided
+		 * ISID, then we have found a match.
+		 */
+		if (!pr_reg->isid_present_at_reg) {
+			/*
+			 * Determine if this SCSI device server requires that
+			 * SCSI Intiatior TransportID w/ ISIDs is enforced
+			 * for fabric modules (iSCSI) requiring them.
+			 */
+			if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
+				if (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids)
+					continue;
+			}
+			atomic_inc(&pr_reg->pr_res_holders);
+			smp_mb__after_atomic_inc();
+			spin_unlock(&pr_tmpl->registration_lock);
+			return pr_reg;
+		}
+		/*
+		 * If the *pr_reg contains a fabric defined ISID for multi-value
+		 * SCSI Initiator Port TransportIDs, then we expect a valid
+		 * matching ISID to be provided by the local SCSI Initiator Port.
+		 */
+		if (!isid)
+			continue;
+		if (strcmp(isid, pr_reg->pr_reg_isid))
+			continue;
+
+		atomic_inc(&pr_reg->pr_res_holders);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&pr_tmpl->registration_lock);
+		return pr_reg;
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+
+	return NULL;
+}
+
+static struct t10_pr_registration *core_scsi3_locate_pr_reg(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct se_session *sess)
+{
+	struct se_portal_group *tpg = nacl->se_tpg;
+	unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+
+	if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
+		memset(&buf[0], 0, PR_REG_ISID_LEN);
+		tpg->se_tpg_tfo->sess_get_initiator_sid(sess, &buf[0],
+					PR_REG_ISID_LEN);
+		isid_ptr = &buf[0];
+	}
+
+	return __core_scsi3_locate_pr_reg(dev, nacl, isid_ptr);
+}
+
+static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
+{
+	atomic_dec(&pr_reg->pr_res_holders);
+	smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_check_implict_release(
+	struct se_device *dev,
+	struct t10_pr_registration *pr_reg)
+{
+	struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
+	struct t10_pr_registration *pr_res_holder;
+	int ret = 0;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (!pr_res_holder) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return ret;
+	}
+	if (pr_res_holder == pr_reg) {
+		/*
+		 * Perform an implict RELEASE if the registration that
+		 * is being released is holding the reservation.
+		 *
+		 * From spc4r17, section 5.7.11.1:
+		 *
+		 * e) If the I_T nexus is the persistent reservation holder
+		 *    and the persistent reservation is not an all registrants
+		 *    type, then a PERSISTENT RESERVE OUT command with REGISTER
+		 *    service action or REGISTER AND  IGNORE EXISTING KEY
+		 *    service action with the SERVICE ACTION RESERVATION KEY
+		 *    field set to zero (see 5.7.11.3).
+		 */
+		__core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0);
+		ret = 1;
+		/*
+		 * For 'All Registrants' reservation types, all existing
+		 * registrations are still processed as reservation holders
+		 * in core_scsi3_pr_seq_non_holder() after the initial
+		 * reservation holder is implictly released here.
+		 */
+	} else if (pr_reg->pr_reg_all_tg_pt &&
+		  (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
+			  pr_reg->pr_reg_nacl->initiatorname)) &&
+		  (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) {
+		pr_err("SPC-3 PR: Unable to perform ALL_TG_PT=1"
+			" UNREGISTER while existing reservation with matching"
+			" key 0x%016Lx is present from another SCSI Initiator"
+			" Port\n", pr_reg->pr_res_key);
+		ret = -EPERM;
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return ret;
+}
+
+/*
+ * Called with struct t10_reservation->registration_lock held.
+ */
+static void __core_scsi3_free_registration(
+	struct se_device *dev,
+	struct t10_pr_registration *pr_reg,
+	struct list_head *preempt_and_abort_list,
+	int dec_holders)
+{
+	struct target_core_fabric_ops *tfo =
+			pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
+	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+
+	pr_reg->pr_reg_deve->def_pr_registered = 0;
+	pr_reg->pr_reg_deve->pr_res_key = 0;
+	list_del(&pr_reg->pr_reg_list);
+	/*
+	 * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(),
+	 * so call core_scsi3_put_pr_reg() to decrement our reference.
+	 */
+	if (dec_holders)
+		core_scsi3_put_pr_reg(pr_reg);
+	/*
+	 * Wait until all reference from any other I_T nexuses for this
+	 * *pr_reg have been released.  Because list_del() is called above,
+	 * the last core_scsi3_put_pr_reg(pr_reg) will release this reference
+	 * count back to zero, and we release *pr_reg.
+	 */
+	while (atomic_read(&pr_reg->pr_res_holders) != 0) {
+		spin_unlock(&pr_tmpl->registration_lock);
+		pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n",
+				tfo->get_fabric_name());
+		cpu_relax();
+		spin_lock(&pr_tmpl->registration_lock);
+	}
+
+	pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
+		" Node: %s%s\n", tfo->get_fabric_name(),
+		pr_reg->pr_reg_nacl->initiatorname,
+		(prf_isid) ? &i_buf[0] : "");
+	pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+		" Port(s)\n", tfo->get_fabric_name(),
+		(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
+		dev->transport->name);
+	pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+		" 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key,
+		pr_reg->pr_res_generation);
+
+	if (!preempt_and_abort_list) {
+		pr_reg->pr_reg_deve = NULL;
+		pr_reg->pr_reg_nacl = NULL;
+		kfree(pr_reg->pr_aptpl_buf);
+		kmem_cache_free(t10_pr_reg_cache, pr_reg);
+		return;
+	}
+	/*
+	 * For PREEMPT_AND_ABORT, the list of *pr_reg in preempt_and_abort_list
+	 * are released once the ABORT_TASK_SET has completed..
+	 */
+	list_add_tail(&pr_reg->pr_reg_abort_list, preempt_and_abort_list);
+}
+
+void core_scsi3_free_pr_reg_from_nacl(
+	struct se_device *dev,
+	struct se_node_acl *nacl)
+{
+	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
+	/*
+	 * If the passed se_node_acl matches the reservation holder,
+	 * release the reservation.
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if ((pr_res_holder != NULL) &&
+	    (pr_res_holder->pr_reg_nacl == nacl))
+		__core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0);
+	spin_unlock(&dev->dev_reservation_lock);
+	/*
+	 * Release any registration associated with the struct se_node_acl.
+	 */
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		if (pr_reg->pr_reg_nacl != nacl)
+			continue;
+
+		__core_scsi3_free_registration(dev, pr_reg, NULL, 0);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+}
+
+void core_scsi3_free_all_registrations(
+	struct se_device *dev)
+{
+	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (pr_res_holder != NULL) {
+		struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+		__core_scsi3_complete_pro_release(dev, pr_res_nacl,
+				pr_res_holder, 0);
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		__core_scsi3_free_registration(dev, pr_reg, NULL, 0);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+
+	spin_lock(&pr_tmpl->aptpl_reg_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
+				pr_reg_aptpl_list) {
+		list_del(&pr_reg->pr_reg_aptpl_list);
+		kfree(pr_reg->pr_aptpl_buf);
+		kmem_cache_free(t10_pr_reg_cache, pr_reg);
+	}
+	spin_unlock(&pr_tmpl->aptpl_reg_lock);
+}
+
+static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg)
+{
+	return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
+			&tpg->tpg_group.cg_item);
+}
+
+static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
+{
+	configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
+			&tpg->tpg_group.cg_item);
+
+	atomic_dec(&tpg->tpg_pr_ref_count);
+	smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
+{
+	struct se_portal_group *tpg = nacl->se_tpg;
+
+	if (nacl->dynamic_node_acl)
+		return 0;
+
+	return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
+			&nacl->acl_group.cg_item);
+}
+
+static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
+{
+	struct se_portal_group *tpg = nacl->se_tpg;
+
+	if (nacl->dynamic_node_acl) {
+		atomic_dec(&nacl->acl_pr_ref_count);
+		smp_mb__after_atomic_dec();
+		return;
+	}
+
+	configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
+			&nacl->acl_group.cg_item);
+
+	atomic_dec(&nacl->acl_pr_ref_count);
+	smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
+{
+	struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
+	struct se_node_acl *nacl;
+	struct se_portal_group *tpg;
+	/*
+	 * For nacl->dynamic_node_acl=1
+	 */
+	if (!lun_acl)
+		return 0;
+
+	nacl = lun_acl->se_lun_nacl;
+	tpg = nacl->se_tpg;
+
+	return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
+			&lun_acl->se_lun_group.cg_item);
+}
+
+static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
+{
+	struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
+	struct se_node_acl *nacl;
+	struct se_portal_group *tpg;
+	/*
+	 * For nacl->dynamic_node_acl=1
+	 */
+	if (!lun_acl) {
+		atomic_dec(&se_deve->pr_ref_count);
+		smp_mb__after_atomic_dec();
+		return;
+	}
+	nacl = lun_acl->se_lun_nacl;
+	tpg = nacl->se_tpg;
+
+	configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
+			&lun_acl->se_lun_group.cg_item);
+
+	atomic_dec(&se_deve->pr_ref_count);
+	smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_decode_spec_i_port(
+	struct se_cmd *cmd,
+	struct se_portal_group *tpg,
+	unsigned char *l_isid,
+	u64 sa_res_key,
+	int all_tg_pt,
+	int aptpl)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_port *tmp_port;
+	struct se_portal_group *dest_tpg = NULL, *tmp_tpg;
+	struct se_session *se_sess = cmd->se_sess;
+	struct se_node_acl *dest_node_acl = NULL;
+	struct se_dev_entry *dest_se_deve = NULL, *local_se_deve;
+	struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e;
+	struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
+	LIST_HEAD(tid_dest_list);
+	struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
+	struct target_core_fabric_ops *tmp_tf_ops;
+	unsigned char *buf;
+	unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
+	char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+	u32 tpdl, tid_len = 0;
+	int ret, dest_local_nexus, prf_isid;
+	u32 dest_rtpi = 0;
+
+	memset(dest_iport, 0, 64);
+
+	local_se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	/*
+	 * Allocate a struct pr_transport_id_holder and setup the
+	 * local_node_acl and local_se_deve pointers and add to
+	 * struct list_head tid_dest_list for add registration
+	 * processing in the loop of tid_dest_list below.
+	 */
+	tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
+	if (!tidh_new) {
+		pr_err("Unable to allocate tidh_new\n");
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		return -EINVAL;
+	}
+	INIT_LIST_HEAD(&tidh_new->dest_list);
+	tidh_new->dest_tpg = tpg;
+	tidh_new->dest_node_acl = se_sess->se_node_acl;
+	tidh_new->dest_se_deve = local_se_deve;
+
+	local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
+				se_sess->se_node_acl, local_se_deve, l_isid,
+				sa_res_key, all_tg_pt, aptpl);
+	if (!local_pr_reg) {
+		kfree(tidh_new);
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		return -ENOMEM;
+	}
+	tidh_new->dest_pr_reg = local_pr_reg;
+	/*
+	 * The local I_T nexus does not hold any configfs dependances,
+	 * so we set tid_h->dest_local_nexus=1 to prevent the
+	 * configfs_undepend_item() calls in the tid_dest_list loops below.
+	 */
+	tidh_new->dest_local_nexus = 1;
+	list_add_tail(&tidh_new->dest_list, &tid_dest_list);
+
+	buf = transport_kmap_data_sg(cmd);
+	/*
+	 * For a PERSISTENT RESERVE OUT specify initiator ports payload,
+	 * first extract TransportID Parameter Data Length, and make sure
+	 * the value matches up to the SCSI expected data transfer length.
+	 */
+	tpdl = (buf[24] & 0xff) << 24;
+	tpdl |= (buf[25] & 0xff) << 16;
+	tpdl |= (buf[26] & 0xff) << 8;
+	tpdl |= buf[27] & 0xff;
+
+	if ((tpdl + 28) != cmd->data_length) {
+		pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
+			" does not equal CDB data_length: %u\n", tpdl,
+			cmd->data_length);
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		ret = -EINVAL;
+		goto out;
+	}
+	/*
+	 * Start processing the received transport IDs using the
+	 * receiving I_T Nexus portal's fabric dependent methods to
+	 * obtain the SCSI Initiator Port/Device Identifiers.
+	 */
+	ptr = &buf[28];
+
+	while (tpdl > 0) {
+		proto_ident = (ptr[0] & 0x0f);
+		dest_tpg = NULL;
+
+		spin_lock(&dev->se_port_lock);
+		list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) {
+			tmp_tpg = tmp_port->sep_tpg;
+			if (!tmp_tpg)
+				continue;
+			tmp_tf_ops = tmp_tpg->se_tpg_tfo;
+			if (!tmp_tf_ops)
+				continue;
+			if (!tmp_tf_ops->get_fabric_proto_ident ||
+			    !tmp_tf_ops->tpg_parse_pr_out_transport_id)
+				continue;
+			/*
+			 * Look for the matching proto_ident provided by
+			 * the received TransportID
+			 */
+			tmp_proto_ident = tmp_tf_ops->get_fabric_proto_ident(tmp_tpg);
+			if (tmp_proto_ident != proto_ident)
+				continue;
+			dest_rtpi = tmp_port->sep_rtpi;
+
+			i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id(
+					tmp_tpg, (const char *)ptr, &tid_len,
+					&iport_ptr);
+			if (!i_str)
+				continue;
+
+			atomic_inc(&tmp_tpg->tpg_pr_ref_count);
+			smp_mb__after_atomic_inc();
+			spin_unlock(&dev->se_port_lock);
+
+			ret = core_scsi3_tpg_depend_item(tmp_tpg);
+			if (ret != 0) {
+				pr_err(" core_scsi3_tpg_depend_item()"
+					" for tmp_tpg\n");
+				atomic_dec(&tmp_tpg->tpg_pr_ref_count);
+				smp_mb__after_atomic_dec();
+				cmd->scsi_sense_reason =
+					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+				ret = -EINVAL;
+				goto out;
+			}
+			/*
+			 * Locate the desination initiator ACL to be registered
+			 * from the decoded fabric module specific TransportID
+			 * at *i_str.
+			 */
+			spin_lock_irq(&tmp_tpg->acl_node_lock);
+			dest_node_acl = __core_tpg_get_initiator_node_acl(
+						tmp_tpg, i_str);
+			if (dest_node_acl) {
+				atomic_inc(&dest_node_acl->acl_pr_ref_count);
+				smp_mb__after_atomic_inc();
+			}
+			spin_unlock_irq(&tmp_tpg->acl_node_lock);
+
+			if (!dest_node_acl) {
+				core_scsi3_tpg_undepend_item(tmp_tpg);
+				spin_lock(&dev->se_port_lock);
+				continue;
+			}
+
+			ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
+			if (ret != 0) {
+				pr_err("configfs_depend_item() failed"
+					" for dest_node_acl->acl_group\n");
+				atomic_dec(&dest_node_acl->acl_pr_ref_count);
+				smp_mb__after_atomic_dec();
+				core_scsi3_tpg_undepend_item(tmp_tpg);
+				cmd->scsi_sense_reason =
+					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+				ret = -EINVAL;
+				goto out;
+			}
+
+			dest_tpg = tmp_tpg;
+			pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:"
+				" %s Port RTPI: %hu\n",
+				dest_tpg->se_tpg_tfo->get_fabric_name(),
+				dest_node_acl->initiatorname, dest_rtpi);
+
+			spin_lock(&dev->se_port_lock);
+			break;
+		}
+		spin_unlock(&dev->se_port_lock);
+
+		if (!dest_tpg) {
+			pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
+					" dest_tpg\n");
+			cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+			ret = -EINVAL;
+			goto out;
+		}
+#if 0
+		pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
+			" tid_len: %d for %s + %s\n",
+			dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length,
+			tpdl, tid_len, i_str, iport_ptr);
+#endif
+		if (tid_len > tpdl) {
+			pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:"
+				" %u for Transport ID: %s\n", tid_len, ptr);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+			ret = -EINVAL;
+			goto out;
+		}
+		/*
+		 * Locate the desintation struct se_dev_entry pointer for matching
+		 * RELATIVE TARGET PORT IDENTIFIER on the receiving I_T Nexus
+		 * Target Port.
+		 */
+		dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl,
+					dest_rtpi);
+		if (!dest_se_deve) {
+			pr_err("Unable to locate %s dest_se_deve"
+				" from destination RTPI: %hu\n",
+				dest_tpg->se_tpg_tfo->get_fabric_name(),
+				dest_rtpi);
+
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+			ret = -EINVAL;
+			goto out;
+		}
+
+		ret = core_scsi3_lunacl_depend_item(dest_se_deve);
+		if (ret < 0) {
+			pr_err("core_scsi3_lunacl_depend_item()"
+					" failed\n");
+			atomic_dec(&dest_se_deve->pr_ref_count);
+			smp_mb__after_atomic_dec();
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			cmd->scsi_sense_reason =
+				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+			ret = -EINVAL;
+			goto out;
+		}
+#if 0
+		pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
+			" dest_se_deve mapped_lun: %u\n",
+			dest_tpg->se_tpg_tfo->get_fabric_name(),
+			dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
+#endif
+		/*
+		 * Skip any TransportIDs that already have a registration for
+		 * this target port.
+		 */
+		pr_reg_e = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+					iport_ptr);
+		if (pr_reg_e) {
+			core_scsi3_put_pr_reg(pr_reg_e);
+			core_scsi3_lunacl_undepend_item(dest_se_deve);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ptr += tid_len;
+			tpdl -= tid_len;
+			tid_len = 0;
+			continue;
+		}
+		/*
+		 * Allocate a struct pr_transport_id_holder and setup
+		 * the dest_node_acl and dest_se_deve pointers for the
+		 * loop below.
+		 */
+		tidh_new = kzalloc(sizeof(struct pr_transport_id_holder),
+				GFP_KERNEL);
+		if (!tidh_new) {
+			pr_err("Unable to allocate tidh_new\n");
+			core_scsi3_lunacl_undepend_item(dest_se_deve);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			cmd->scsi_sense_reason =
+				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+			ret = -ENOMEM;
+			goto out;
+		}
+		INIT_LIST_HEAD(&tidh_new->dest_list);
+		tidh_new->dest_tpg = dest_tpg;
+		tidh_new->dest_node_acl = dest_node_acl;
+		tidh_new->dest_se_deve = dest_se_deve;
+
+		/*
+		 * Allocate, but do NOT add the registration for the
+		 * TransportID referenced SCSI Initiator port.  This
+		 * done because of the following from spc4r17 in section
+		 * 6.14.3 wrt SPEC_I_PT:
+		 *
+		 * "If a registration fails for any initiator port (e.g., if th
+		 * logical unit does not have enough resources available to
+		 * hold the registration information), no registrations shall be
+		 * made, and the command shall be terminated with
+		 * CHECK CONDITION status."
+		 *
+		 * That means we call __core_scsi3_alloc_registration() here,
+		 * and then call __core_scsi3_add_registration() in the
+		 * 2nd loop which will never fail.
+		 */
+		dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
+				dest_node_acl, dest_se_deve, iport_ptr,
+				sa_res_key, all_tg_pt, aptpl);
+		if (!dest_pr_reg) {
+			core_scsi3_lunacl_undepend_item(dest_se_deve);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			kfree(tidh_new);
+			cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+			ret = -EINVAL;
+			goto out;
+		}
+		tidh_new->dest_pr_reg = dest_pr_reg;
+		list_add_tail(&tidh_new->dest_list, &tid_dest_list);
+
+		ptr += tid_len;
+		tpdl -= tid_len;
+		tid_len = 0;
+
+	}
+
+	transport_kunmap_data_sg(cmd);
+
+	/*
+	 * Go ahead and create a registrations from tid_dest_list for the
+	 * SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl
+	 * and dest_se_deve.
+	 *
+	 * The SA Reservation Key from the PROUT is set for the
+	 * registration, and ALL_TG_PT is also passed.  ALL_TG_PT=1
+	 * means that the TransportID Initiator port will be
+	 * registered on all of the target ports in the SCSI target device
+	 * ALL_TG_PT=0 means the registration will only be for the
+	 * SCSI target port the PROUT REGISTER with SPEC_I_PT=1
+	 * was received.
+	 */
+	list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
+		dest_tpg = tidh->dest_tpg;
+		dest_node_acl = tidh->dest_node_acl;
+		dest_se_deve = tidh->dest_se_deve;
+		dest_pr_reg = tidh->dest_pr_reg;
+		dest_local_nexus = tidh->dest_local_nexus;
+
+		list_del(&tidh->dest_list);
+		kfree(tidh);
+
+		memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+		prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0],
+						PR_REG_ISID_ID_LEN);
+
+		__core_scsi3_add_registration(cmd->se_dev, dest_node_acl,
+					dest_pr_reg, 0, 0);
+
+		pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully"
+			" registered Transport ID for Node: %s%s Mapped LUN:"
+			" %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(),
+			dest_node_acl->initiatorname, (prf_isid) ?
+			&i_buf[0] : "", dest_se_deve->mapped_lun);
+
+		if (dest_local_nexus)
+			continue;
+
+		core_scsi3_lunacl_undepend_item(dest_se_deve);
+		core_scsi3_nodeacl_undepend_item(dest_node_acl);
+		core_scsi3_tpg_undepend_item(dest_tpg);
+	}
+
+	return 0;
+out:
+	transport_kunmap_data_sg(cmd);
+	/*
+	 * For the failure case, release everything from tid_dest_list
+	 * including *dest_pr_reg and the configfs dependances..
+	 */
+	list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
+		dest_tpg = tidh->dest_tpg;
+		dest_node_acl = tidh->dest_node_acl;
+		dest_se_deve = tidh->dest_se_deve;
+		dest_pr_reg = tidh->dest_pr_reg;
+		dest_local_nexus = tidh->dest_local_nexus;
+
+		list_del(&tidh->dest_list);
+		kfree(tidh);
+		/*
+		 * Release any extra ALL_TG_PT=1 registrations for
+		 * the SPEC_I_PT=1 case.
+		 */
+		list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+				&dest_pr_reg->pr_reg_atp_list,
+				pr_reg_atp_mem_list) {
+			list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+			core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+			kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
+		}
+
+		kfree(dest_pr_reg->pr_aptpl_buf);
+		kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
+
+		if (dest_local_nexus)
+			continue;
+
+		core_scsi3_lunacl_undepend_item(dest_se_deve);
+		core_scsi3_nodeacl_undepend_item(dest_node_acl);
+		core_scsi3_tpg_undepend_item(dest_tpg);
+	}
+	return ret;
+}
+
+/*
+ * Called with struct se_device->dev_reservation_lock held
+ */
+static int __core_scsi3_update_aptpl_buf(
+	struct se_device *dev,
+	unsigned char *buf,
+	u32 pr_aptpl_buf_len,
+	int clear_aptpl_metadata)
+{
+	struct se_lun *lun;
+	struct se_portal_group *tpg;
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	struct t10_pr_registration *pr_reg;
+	unsigned char tmp[512], isid_buf[32];
+	ssize_t len = 0;
+	int reg_count = 0;
+
+	memset(buf, 0, pr_aptpl_buf_len);
+	/*
+	 * Called to clear metadata once APTPL has been deactivated.
+	 */
+	if (clear_aptpl_metadata) {
+		snprintf(buf, pr_aptpl_buf_len,
+				"No Registrations or Reservations\n");
+		return 0;
+	}
+	/*
+	 * Walk the registration list..
+	 */
+	spin_lock(&su_dev->t10_pr.registration_lock);
+	list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
+			pr_reg_list) {
+
+		tmp[0] = '\0';
+		isid_buf[0] = '\0';
+		tpg = pr_reg->pr_reg_nacl->se_tpg;
+		lun = pr_reg->pr_reg_tg_pt_lun;
+		/*
+		 * Write out any ISID value to APTPL metadata that was included
+		 * in the original registration.
+		 */
+		if (pr_reg->isid_present_at_reg)
+			snprintf(isid_buf, 32, "initiator_sid=%s\n",
+					pr_reg->pr_reg_isid);
+		/*
+		 * Include special metadata if the pr_reg matches the
+		 * reservation holder.
+		 */
+		if (dev->dev_pr_res_holder == pr_reg) {
+			snprintf(tmp, 512, "PR_REG_START: %d"
+				"\ninitiator_fabric=%s\n"
+				"initiator_node=%s\n%s"
+				"sa_res_key=%llu\n"
+				"res_holder=1\nres_type=%02x\n"
+				"res_scope=%02x\nres_all_tg_pt=%d\n"
+				"mapped_lun=%u\n", reg_count,
+				tpg->se_tpg_tfo->get_fabric_name(),
+				pr_reg->pr_reg_nacl->initiatorname, isid_buf,
+				pr_reg->pr_res_key, pr_reg->pr_res_type,
+				pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt,
+				pr_reg->pr_res_mapped_lun);
+		} else {
+			snprintf(tmp, 512, "PR_REG_START: %d\n"
+				"initiator_fabric=%s\ninitiator_node=%s\n%s"
+				"sa_res_key=%llu\nres_holder=0\n"
+				"res_all_tg_pt=%d\nmapped_lun=%u\n",
+				reg_count, tpg->se_tpg_tfo->get_fabric_name(),
+				pr_reg->pr_reg_nacl->initiatorname, isid_buf,
+				pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt,
+				pr_reg->pr_res_mapped_lun);
+		}
+
+		if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
+			pr_err("Unable to update renaming"
+				" APTPL metadata\n");
+			spin_unlock(&su_dev->t10_pr.registration_lock);
+			return -EMSGSIZE;
+		}
+		len += sprintf(buf+len, "%s", tmp);
+
+		/*
+		 * Include information about the associated SCSI target port.
+		 */
+		snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n"
+			"tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:"
+			" %d\n", tpg->se_tpg_tfo->get_fabric_name(),
+			tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+			tpg->se_tpg_tfo->tpg_get_tag(tpg),
+			lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
+
+		if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
+			pr_err("Unable to update renaming"
+				" APTPL metadata\n");
+			spin_unlock(&su_dev->t10_pr.registration_lock);
+			return -EMSGSIZE;
+		}
+		len += sprintf(buf+len, "%s", tmp);
+		reg_count++;
+	}
+	spin_unlock(&su_dev->t10_pr.registration_lock);
+
+	if (!reg_count)
+		len += sprintf(buf+len, "No Registrations or Reservations");
+
+	return 0;
+}
+
+static int core_scsi3_update_aptpl_buf(
+	struct se_device *dev,
+	unsigned char *buf,
+	u32 pr_aptpl_buf_len,
+	int clear_aptpl_metadata)
+{
+	int ret;
+
+	spin_lock(&dev->dev_reservation_lock);
+	ret = __core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
+				clear_aptpl_metadata);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return ret;
+}
+
+/*
+ * Called with struct se_device->aptpl_file_mutex held
+ */
+static int __core_scsi3_write_aptpl_to_file(
+	struct se_device *dev,
+	unsigned char *buf,
+	u32 pr_aptpl_buf_len)
+{
+	struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
+	struct file *file;
+	struct iovec iov[1];
+	mm_segment_t old_fs;
+	int flags = O_RDWR | O_CREAT | O_TRUNC;
+	char path[512];
+	int ret;
+
+	memset(iov, 0, sizeof(struct iovec));
+	memset(path, 0, 512);
+
+	if (strlen(&wwn->unit_serial[0]) >= 512) {
+		pr_err("WWN value for struct se_device does not fit"
+			" into path buffer\n");
+		return -EMSGSIZE;
+	}
+
+	snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
+	file = filp_open(path, flags, 0600);
+	if (IS_ERR(file) || !file || !file->f_dentry) {
+		pr_err("filp_open(%s) for APTPL metadata"
+			" failed\n", path);
+		return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
+	}
+
+	iov[0].iov_base = &buf[0];
+	if (!pr_aptpl_buf_len)
+		iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
+	else
+		iov[0].iov_len = pr_aptpl_buf_len;
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
+	set_fs(old_fs);
+
+	if (ret < 0) {
+		pr_debug("Error writing APTPL metadata file: %s\n", path);
+		filp_close(file, NULL);
+		return -EIO;
+	}
+	filp_close(file, NULL);
+
+	return 0;
+}
+
+static int core_scsi3_update_and_write_aptpl(
+	struct se_device *dev,
+	unsigned char *in_buf,
+	u32 in_pr_aptpl_buf_len)
+{
+	unsigned char null_buf[64], *buf;
+	u32 pr_aptpl_buf_len;
+	int ret, clear_aptpl_metadata = 0;
+	/*
+	 * Can be called with a NULL pointer from PROUT service action CLEAR
+	 */
+	if (!in_buf) {
+		memset(null_buf, 0, 64);
+		buf = &null_buf[0];
+		/*
+		 * This will clear the APTPL metadata to:
+		 * "No Registrations or Reservations" status
+		 */
+		pr_aptpl_buf_len = 64;
+		clear_aptpl_metadata = 1;
+	} else {
+		buf = in_buf;
+		pr_aptpl_buf_len = in_pr_aptpl_buf_len;
+	}
+
+	ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
+				clear_aptpl_metadata);
+	if (ret != 0)
+		return ret;
+	/*
+	 * __core_scsi3_write_aptpl_to_file() will call strlen()
+	 * on the passed buf to determine pr_aptpl_buf_len.
+	 */
+	ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0);
+	if (ret != 0)
+		return ret;
+
+	return ret;
+}
+
+static int core_scsi3_emulate_pro_register(
+	struct se_cmd *cmd,
+	u64 res_key,
+	u64 sa_res_key,
+	int aptpl,
+	int all_tg_pt,
+	int spec_i_pt,
+	int ignore_key)
+{
+	struct se_session *se_sess = cmd->se_sess;
+	struct se_device *dev = cmd->se_dev;
+	struct se_dev_entry *se_deve;
+	struct se_lun *se_lun = cmd->se_lun;
+	struct se_portal_group *se_tpg;
+	struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e;
+	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+	/* Used for APTPL metadata w/ UNREGISTER */
+	unsigned char *pr_aptpl_buf = NULL;
+	unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+	int pr_holder = 0, ret = 0, type;
+
+	if (!se_sess || !se_lun) {
+		pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		return -EINVAL;
+	}
+	se_tpg = se_sess->se_tpg;
+	se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+
+	if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) {
+		memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
+		se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &isid_buf[0],
+				PR_REG_ISID_LEN);
+		isid_ptr = &isid_buf[0];
+	}
+	/*
+	 * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47
+	 */
+	pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
+	if (!pr_reg_e) {
+		if (res_key) {
+			pr_warn("SPC-3 PR: Reservation Key non-zero"
+				" for SA REGISTER, returning CONFLICT\n");
+			cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+			return -EINVAL;
+		}
+		/*
+		 * Do nothing but return GOOD status.
+		 */
+		if (!sa_res_key)
+			return 0;
+
+		if (!spec_i_pt) {
+			/*
+			 * Perform the Service Action REGISTER on the Initiator
+			 * Port Endpoint that the PRO was received from on the
+			 * Logical Unit of the SCSI device server.
+			 */
+			ret = core_scsi3_alloc_registration(cmd->se_dev,
+					se_sess->se_node_acl, se_deve, isid_ptr,
+					sa_res_key, all_tg_pt, aptpl,
+					ignore_key, 0);
+			if (ret != 0) {
+				pr_err("Unable to allocate"
+					" struct t10_pr_registration\n");
+				cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+				return -EINVAL;
+			}
+		} else {
+			/*
+			 * Register both the Initiator port that received
+			 * PROUT SA REGISTER + SPEC_I_PT=1 and extract SCSI
+			 * TransportID from Parameter list and loop through
+			 * fabric dependent parameter list while calling
+			 * logic from of core_scsi3_alloc_registration() for
+			 * each TransportID provided SCSI Initiator Port/Device
+			 */
+			ret = core_scsi3_decode_spec_i_port(cmd, se_tpg,
+					isid_ptr, sa_res_key, all_tg_pt, aptpl);
+			if (ret != 0)
+				return ret;
+		}
+		/*
+		 * Nothing left to do for the APTPL=0 case.
+		 */
+		if (!aptpl) {
+			pr_tmpl->pr_aptpl_active = 0;
+			core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
+			pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for"
+					" REGISTER\n");
+			return 0;
+		}
+		/*
+		 * Locate the newly allocated local I_T Nexus *pr_reg, and
+		 * update the APTPL metadata information using its
+		 * preallocated *pr_reg->pr_aptpl_buf.
+		 */
+		pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev,
+				se_sess->se_node_acl, se_sess);
+
+		ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+				&pr_reg->pr_aptpl_buf[0],
+				pr_tmpl->pr_aptpl_buf_len);
+		if (!ret) {
+			pr_tmpl->pr_aptpl_active = 1;
+			pr_debug("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
+		}
+
+		core_scsi3_put_pr_reg(pr_reg);
+		return ret;
+	} else {
+		/*
+		 * Locate the existing *pr_reg via struct se_node_acl pointers
+		 */
+		pr_reg = pr_reg_e;
+		type = pr_reg->pr_res_type;
+
+		if (!ignore_key) {
+			if (res_key != pr_reg->pr_res_key) {
+				pr_err("SPC-3 PR REGISTER: Received"
+					" res_key: 0x%016Lx does not match"
+					" existing SA REGISTER res_key:"
+					" 0x%016Lx\n", res_key,
+					pr_reg->pr_res_key);
+				core_scsi3_put_pr_reg(pr_reg);
+				cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+				return -EINVAL;
+			}
+		}
+		if (spec_i_pt) {
+			pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
+				" set while sa_res_key=0\n");
+			core_scsi3_put_pr_reg(pr_reg);
+			cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+			return -EINVAL;
+		}
+		/*
+		 * An existing ALL_TG_PT=1 registration being released
+		 * must also set ALL_TG_PT=1 in the incoming PROUT.
+		 */
+		if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
+			pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1"
+				" registration exists, but ALL_TG_PT=1 bit not"
+				" present in received PROUT\n");
+			core_scsi3_put_pr_reg(pr_reg);
+			cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+			return -EINVAL;
+		}
+		/*
+		 * Allocate APTPL metadata buffer used for UNREGISTER ops
+		 */
+		if (aptpl) {
+			pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
+						GFP_KERNEL);
+			if (!pr_aptpl_buf) {
+				pr_err("Unable to allocate"
+					" pr_aptpl_buf\n");
+				core_scsi3_put_pr_reg(pr_reg);
+				cmd->scsi_sense_reason =
+					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+				return -EINVAL;
+			}
+		}
+		/*
+		 * sa_res_key=0 Unregister Reservation Key for registered I_T
+		 * Nexus sa_res_key=1 Change Reservation Key for registered I_T
+		 * Nexus.
+		 */
+		if (!sa_res_key) {
+			pr_holder = core_scsi3_check_implict_release(
+					cmd->se_dev, pr_reg);
+			if (pr_holder < 0) {
+				kfree(pr_aptpl_buf);
+				core_scsi3_put_pr_reg(pr_reg);
+				cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+				return -EINVAL;
+			}
+
+			spin_lock(&pr_tmpl->registration_lock);
+			/*
+			 * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port
+			 * and matching pr_res_key.
+			 */
+			if (pr_reg->pr_reg_all_tg_pt) {
+				list_for_each_entry_safe(pr_reg_p, pr_reg_tmp,
+						&pr_tmpl->registration_list,
+						pr_reg_list) {
+
+					if (!pr_reg_p->pr_reg_all_tg_pt)
+						continue;
+
+					if (pr_reg_p->pr_res_key != res_key)
+						continue;
+
+					if (pr_reg == pr_reg_p)
+						continue;
+
+					if (strcmp(pr_reg->pr_reg_nacl->initiatorname,
+						   pr_reg_p->pr_reg_nacl->initiatorname))
+						continue;
+
+					__core_scsi3_free_registration(dev,
+							pr_reg_p, NULL, 0);
+				}
+			}
+			/*
+			 * Release the calling I_T Nexus registration now..
+			 */
+			__core_scsi3_free_registration(cmd->se_dev, pr_reg,
+							NULL, 1);
+			/*
+			 * From spc4r17, section 5.7.11.3 Unregistering
+			 *
+			 * If the persistent reservation is a registrants only
+			 * type, the device server shall establish a unit
+			 * attention condition for the initiator port associated
+			 * with every registered I_T nexus except for the I_T
+			 * nexus on which the PERSISTENT RESERVE OUT command was
+			 * received, with the additional sense code set to
+			 * RESERVATIONS RELEASED.
+			 */
+			if (pr_holder &&
+			   ((type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
+			    (type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY))) {
+				list_for_each_entry(pr_reg_p,
+						&pr_tmpl->registration_list,
+						pr_reg_list) {
+
+					core_scsi3_ua_allocate(
+						pr_reg_p->pr_reg_nacl,
+						pr_reg_p->pr_res_mapped_lun,
+						0x2A,
+						ASCQ_2AH_RESERVATIONS_RELEASED);
+				}
+			}
+			spin_unlock(&pr_tmpl->registration_lock);
+
+			if (!aptpl) {
+				pr_tmpl->pr_aptpl_active = 0;
+				core_scsi3_update_and_write_aptpl(dev, NULL, 0);
+				pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
+						" for UNREGISTER\n");
+				return 0;
+			}
+
+			ret = core_scsi3_update_and_write_aptpl(dev,
+					&pr_aptpl_buf[0],
+					pr_tmpl->pr_aptpl_buf_len);
+			if (!ret) {
+				pr_tmpl->pr_aptpl_active = 1;
+				pr_debug("SPC-3 PR: Set APTPL Bit Activated"
+						" for UNREGISTER\n");
+			}
+
+			kfree(pr_aptpl_buf);
+			return ret;
+		} else {
+			/*
+			 * Increment PRgeneration counter for struct se_device"
+			 * upon a successful REGISTER, see spc4r17 section 6.3.2
+			 * READ_KEYS service action.
+			 */
+			pr_reg->pr_res_generation = core_scsi3_pr_generation(
+							cmd->se_dev);
+			pr_reg->pr_res_key = sa_res_key;
+			pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
+				" Key for %s to: 0x%016Lx PRgeneration:"
+				" 0x%08x\n", cmd->se_tfo->get_fabric_name(),
+				(ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
+				pr_reg->pr_reg_nacl->initiatorname,
+				pr_reg->pr_res_key, pr_reg->pr_res_generation);
+
+			if (!aptpl) {
+				pr_tmpl->pr_aptpl_active = 0;
+				core_scsi3_update_and_write_aptpl(dev, NULL, 0);
+				core_scsi3_put_pr_reg(pr_reg);
+				pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
+						" for REGISTER\n");
+				return 0;
+			}
+
+			ret = core_scsi3_update_and_write_aptpl(dev,
+					&pr_aptpl_buf[0],
+					pr_tmpl->pr_aptpl_buf_len);
+			if (!ret) {
+				pr_tmpl->pr_aptpl_active = 1;
+				pr_debug("SPC-3 PR: Set APTPL Bit Activated"
+						" for REGISTER\n");
+			}
+
+			kfree(pr_aptpl_buf);
+			core_scsi3_put_pr_reg(pr_reg);
+		}
+	}
+	return 0;
+}
+
+unsigned char *core_scsi3_pr_dump_type(int type)
+{
+	switch (type) {
+	case PR_TYPE_WRITE_EXCLUSIVE:
+		return "Write Exclusive Access";
+	case PR_TYPE_EXCLUSIVE_ACCESS:
+		return "Exclusive Access";
+	case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+		return "Write Exclusive Access, Registrants Only";
+	case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+		return "Exclusive Access, Registrants Only";
+	case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+		return "Write Exclusive Access, All Registrants";
+	case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+		return "Exclusive Access, All Registrants";
+	default:
+		break;
+	}
+
+	return "Unknown SPC-3 PR Type";
+}
+
+static int core_scsi3_pro_reserve(
+	struct se_cmd *cmd,
+	struct se_device *dev,
+	int type,
+	int scope,
+	u64 res_key)
+{
+	struct se_session *se_sess = cmd->se_sess;
+	struct se_lun *se_lun = cmd->se_lun;
+	struct t10_pr_registration *pr_reg, *pr_res_holder;
+	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int ret, prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+
+	if (!se_sess || !se_lun) {
+		pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		return -EINVAL;
+	}
+	/*
+	 * Locate the existing *pr_reg via struct se_node_acl pointers
+	 */
+	pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
+				se_sess);
+	if (!pr_reg) {
+		pr_err("SPC-3 PR: Unable to locate"
+			" PR_REGISTERED *pr_reg for RESERVE\n");
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		return -EINVAL;
+	}
+	/*
+	 * From spc4r17 Section 5.7.9: Reserving:
+	 *
+	 * An application client creates a persistent reservation by issuing
+	 * a PERSISTENT RESERVE OUT command with RESERVE service action through
+	 * a registered I_T nexus with the following parameters:
+	 *    a) RESERVATION KEY set to the value of the reservation key that is
+	 * 	 registered with the logical unit for the I_T nexus; and
+	 */
+	if (res_key != pr_reg->pr_res_key) {
+		pr_err("SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
+			" does not match existing SA REGISTER res_key:"
+			" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+		core_scsi3_put_pr_reg(pr_reg);
+		cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+		return -EINVAL;
+	}
+	/*
+	 * From spc4r17 Section 5.7.9: Reserving:
+	 *
+	 * From above:
+	 *  b) TYPE field and SCOPE field set to the persistent reservation
+	 *     being created.
+	 *
+	 * Only one persistent reservation is allowed at a time per logical unit
+	 * and that persistent reservation has a scope of LU_SCOPE.
+	 */
+	if (scope != PR_SCOPE_LU_SCOPE) {
+		pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+		core_scsi3_put_pr_reg(pr_reg);
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		return -EINVAL;
+	}
+	/*
+	 * See if we have an existing PR reservation holder pointer at
+	 * struct se_device->dev_pr_res_holder in the form struct t10_pr_registration
+	 * *pr_res_holder.
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if ((pr_res_holder)) {
+		/*
+		 * From spc4r17 Section 5.7.9: Reserving:
+		 *
+		 * If the device server receives a PERSISTENT RESERVE OUT
+		 * command from an I_T nexus other than a persistent reservation
+		 * holder (see 5.7.10) that attempts to create a persistent
+		 * reservation when a persistent reservation already exists for
+		 * the logical unit, then the command shall be completed with
+		 * RESERVATION CONFLICT status.
+		 */
+		if (pr_res_holder != pr_reg) {
+			struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+			pr_err("SPC-3 PR: Attempted RESERVE from"
+				" [%s]: %s while reservation already held by"
+				" [%s]: %s, returning RESERVATION_CONFLICT\n",
+				cmd->se_tfo->get_fabric_name(),
+				se_sess->se_node_acl->initiatorname,
+				pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+				pr_res_holder->pr_reg_nacl->initiatorname);
+
+			spin_unlock(&dev->dev_reservation_lock);
+			core_scsi3_put_pr_reg(pr_reg);
+			cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+			return -EINVAL;
+		}
+		/*
+		 * From spc4r17 Section 5.7.9: Reserving:
+		 *
+		 * If a persistent reservation holder attempts to modify the
+		 * type or scope of an existing persistent reservation, the
+		 * command shall be completed with RESERVATION CONFLICT status.
+		 */
+		if ((pr_res_holder->pr_res_type != type) ||
+		    (pr_res_holder->pr_res_scope != scope)) {
+			struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+			pr_err("SPC-3 PR: Attempted RESERVE from"
+				" [%s]: %s trying to change TYPE and/or SCOPE,"
+				" while reservation already held by [%s]: %s,"
+				" returning RESERVATION_CONFLICT\n",
+				cmd->se_tfo->get_fabric_name(),
+				se_sess->se_node_acl->initiatorname,
+				pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+				pr_res_holder->pr_reg_nacl->initiatorname);
+
+			spin_unlock(&dev->dev_reservation_lock);
+			core_scsi3_put_pr_reg(pr_reg);
+			cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+			return -EINVAL;
+		}
+		/*
+		 * From spc4r17 Section 5.7.9: Reserving:
+		 *
+		 * If the device server receives a PERSISTENT RESERVE OUT
+		 * command with RESERVE service action where the TYPE field and
+		 * the SCOPE field contain the same values as the existing type
+		 * and scope from a persistent reservation holder, it shall not
+		 * make any change to the existing persistent reservation and
+		 * shall completethe command with GOOD status.
+		 */
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg);
+		return 0;
+	}
+	/*
+	 * Otherwise, our *pr_reg becomes the PR reservation holder for said
+	 * TYPE/SCOPE.  Also set the received scope and type in *pr_reg.
+	 */
+	pr_reg->pr_res_scope = scope;
+	pr_reg->pr_res_type = type;
+	pr_reg->pr_res_holder = 1;
+	dev->dev_pr_res_holder = pr_reg;
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+
+	pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new"
+		" reservation holder TYPE: %s ALL_TG_PT: %d\n",
+		cmd->se_tfo->get_fabric_name(), core_scsi3_pr_dump_type(type),
+		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+	pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
+			cmd->se_tfo->get_fabric_name(),
+			se_sess->se_node_acl->initiatorname,
+			(prf_isid) ? &i_buf[0] : "");
+	spin_unlock(&dev->dev_reservation_lock);
+
+	if (pr_tmpl->pr_aptpl_active) {
+		ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+				&pr_reg->pr_aptpl_buf[0],
+				pr_tmpl->pr_aptpl_buf_len);
+		if (!ret)
+			pr_debug("SPC-3 PR: Updated APTPL metadata"
+					" for RESERVE\n");
+	}
+
+	core_scsi3_put_pr_reg(pr_reg);
+	return 0;
+}
+
+static int core_scsi3_emulate_pro_reserve(
+	struct se_cmd *cmd,
+	int type,
+	int scope,
+	u64 res_key)
+{
+	struct se_device *dev = cmd->se_dev;
+	int ret = 0;
+
+	switch (type) {
+	case PR_TYPE_WRITE_EXCLUSIVE:
+	case PR_TYPE_EXCLUSIVE_ACCESS:
+	case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+	case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+	case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+	case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+		ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key);
+		break;
+	default:
+		pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
+			" 0x%02x\n", type);
+		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+/*
+ * Called with struct se_device->dev_reservation_lock held.
+ */
+static void __core_scsi3_complete_pro_release(
+	struct se_device *dev,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int explict)
+{
+	struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+	/*
+	 * Go ahead and release the current PR reservation holder.
+	 */
+	dev->dev_pr_res_holder = NULL;
+
+	pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
+		" reservation holder TYPE: %s ALL_TG_PT: %d\n",
+		tfo->get_fabric_name(), (explict) ? "explict" : "implict",
+		core_scsi3_pr_dump_type(pr_reg->pr_res_type),
+		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+	pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
+		tfo->get_fabric_name(), se_nacl->initiatorname,
+		(prf_isid) ? &i_buf[0] : "");
+	/*
+	 * Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE
+	 */
+	pr_reg->pr_res_holder = pr_reg->pr_res_type = pr_reg->pr_res_scope = 0;
+}
+
+static int core_scsi3_emulate_pro_release(
+	struct se_cmd *cmd,
+	int type,
+	int scope,
+	u64 res_key)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *se_sess = cmd->se_sess;
+	struct se_lun *se_lun = cmd->se_lun;
+	struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
+	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+	int ret, all_reg = 0;
+
+	if (!se_sess || !se_lun) {
+		pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		return -EINVAL;
+	}
+	/*
+	 * Locate the existing *pr_reg via struct se_node_acl pointers
+	 */
+	pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
+	if (!pr_reg) {
+		pr_err("SPC-3 PR: Unable to locate"
+			" PR_REGISTERED *pr_reg for RELEASE\n");
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		return -EINVAL;
+	}
+	/*
+	 * From spc4r17 Section 5.7.11.2 Releasing:
+	 *
+	 * If there is no persistent reservation or in response to a persistent
+	 * reservation release request from a registered I_T nexus that is not a
+	 * persistent reservation holder (see 5.7.10), the device server shall
+	 * do the following:
+	 *
+	 *     a) Not release the persistent reservation, if any;
+	 *     b) Not remove any registrations; and
+	 *     c) Complete the command with GOOD status.
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (!pr_res_holder) {
+		/*
+		 * No persistent reservation, return GOOD status.
+		 */
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg);
+		return 0;
+	}
+	if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+	    (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
+		all_reg = 1;
+
+	if ((all_reg == 0) && (pr_res_holder != pr_reg)) {
+		/*
+		 * Non 'All Registrants' PR Type cases..
+		 * Release request from a registered I_T nexus that is not a
+		 * persistent reservation holder. return GOOD status.
+		 */
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg);
+		return 0;
+	}
+	/*
+	 * From spc4r17 Section 5.7.11.2 Releasing:
+	 *
+	 * Only the persistent reservation holder (see 5.7.10) is allowed to
+	 * release a persistent reservation.
+	 *
+	 * An application client releases the persistent reservation by issuing
+	 * a PERSISTENT RESERVE OUT command with RELEASE service action through
+	 * an I_T nexus that is a persistent reservation holder with the
+	 * following parameters:
+	 *
+	 *     a) RESERVATION KEY field set to the value of the reservation key
+	 *	  that is registered with the logical unit for the I_T nexus;
+	 */
+	if (res_key != pr_reg->pr_res_key) {
+		pr_err("SPC-3 PR RELEASE: Received res_key: 0x%016Lx"
+			" does not match existing SA REGISTER res_key:"
+			" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg);
+		cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+		return -EINVAL;
+	}
+	/*
+	 * From spc4r17 Section 5.7.11.2 Releasing and above:
+	 *
+	 * b) TYPE field and SCOPE field set to match the persistent
+	 *    reservation being released.
+	 */
+	if ((pr_res_holder->pr_res_type != type) ||
+	    (pr_res_holder->pr_res_scope != scope)) {
+		struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+		pr_err("SPC-3 PR RELEASE: Attempted to release"
+			" reservation from [%s]: %s with different TYPE "
+			"and/or SCOPE  while reservation already held by"
+			" [%s]: %s, returning RESERVATION_CONFLICT\n",
+			cmd->se_tfo->get_fabric_name(),
+			se_sess->se_node_acl->initiatorname,
+			pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+			pr_res_holder->pr_reg_nacl->initiatorname);
+
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg);
+		cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+		return -EINVAL;
+	}
+	/*
+	 * In response to a persistent reservation release request from the
+	 * persistent reservation holder the device server shall perform a
+	 * release by doing the following as an uninterrupted series of actions:
+	 * a) Release the persistent reservation;
+	 * b) Not remove any registration(s);
+	 * c) If the released persistent reservation is a registrants only type
+	 * or all registrants type persistent reservation,
+	 *    the device server shall establish a unit attention condition for
+	 *    the initiator port associated with every regis-
+	 *    tered I_T nexus other than I_T nexus on which the PERSISTENT
+	 *    RESERVE OUT command with RELEASE service action was received,
+	 *    with the additional sense code set to RESERVATIONS RELEASED; and
+	 * d) If the persistent reservation is of any other type, the device
+	 *    server shall not establish a unit attention condition.
+	 */
+	__core_scsi3_complete_pro_release(dev, se_sess->se_node_acl,
+			pr_reg, 1);
+
+	spin_unlock(&dev->dev_reservation_lock);
+
+	if ((type != PR_TYPE_WRITE_EXCLUSIVE_REGONLY) &&
+	    (type != PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) &&
+	    (type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
+	    (type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+		/*
+		 * If no UNIT ATTENTION conditions will be established for
+		 * PR_TYPE_WRITE_EXCLUSIVE or PR_TYPE_EXCLUSIVE_ACCESS
+		 * go ahead and check for APTPL=1 update+write below
+		 */
+		goto write_aptpl;
+	}
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry(pr_reg_p, &pr_tmpl->registration_list,
+			pr_reg_list) {
+		/*
+		 * Do not establish a UNIT ATTENTION condition
+		 * for the calling I_T Nexus
+		 */
+		if (pr_reg_p == pr_reg)
+			continue;
+
+		core_scsi3_ua_allocate(pr_reg_p->pr_reg_nacl,
+				pr_reg_p->pr_res_mapped_lun,
+				0x2A, ASCQ_2AH_RESERVATIONS_RELEASED);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+
+write_aptpl:
+	if (pr_tmpl->pr_aptpl_active) {
+		ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+				&pr_reg->pr_aptpl_buf[0],
+				pr_tmpl->pr_aptpl_buf_len);
+		if (!ret)
+			pr_debug("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
+	}
+
+	core_scsi3_put_pr_reg(pr_reg);
+	return 0;
+}
+
+static int core_scsi3_emulate_pro_clear(
+	struct se_cmd *cmd,
+	u64 res_key)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_node_acl *pr_reg_nacl;
+	struct se_session *se_sess = cmd->se_sess;
+	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
+	u32 pr_res_mapped_lun = 0;
+	int calling_it_nexus = 0;
+	/*
+	 * Locate the existing *pr_reg via struct se_node_acl pointers
+	 */
+	pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev,
+			se_sess->se_node_acl, se_sess);
+	if (!pr_reg_n) {
+		pr_err("SPC-3 PR: Unable to locate"
+			" PR_REGISTERED *pr_reg for CLEAR\n");
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		return -EINVAL;
+	}
+	/*
+	 * From spc4r17 section 5.7.11.6, Clearing:
+	 *
+	 * Any application client may release the persistent reservation and
+	 * remove all registrations from a device server by issuing a
+	 * PERSISTENT RESERVE OUT command with CLEAR service action through a
+	 * registered I_T nexus with the following parameter:
+	 *
+	 *	a) RESERVATION KEY field set to the value of the reservation key
+	 * 	   that is registered with the logical unit for the I_T nexus.
+	 */
+	if (res_key != pr_reg_n->pr_res_key) {
+		pr_err("SPC-3 PR REGISTER: Received"
+			" res_key: 0x%016Lx does not match"
+			" existing SA REGISTER res_key:"
+			" 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
+		core_scsi3_put_pr_reg(pr_reg_n);
+		cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+		return -EINVAL;
+	}
+	/*
+	 * a) Release the persistent reservation, if any;
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (pr_res_holder) {
+		struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+		__core_scsi3_complete_pro_release(dev, pr_res_nacl,
+			pr_res_holder, 0);
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+	/*
+	 * b) Remove all registration(s) (see spc4r17 5.7.7);
+	 */
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+		pr_reg_nacl = pr_reg->pr_reg_nacl;
+		pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+		__core_scsi3_free_registration(dev, pr_reg, NULL,
+					calling_it_nexus);
+		/*
+		 * e) Establish a unit attention condition for the initiator
+		 *    port associated with every registered I_T nexus other
+		 *    than the I_T nexus on which the PERSISTENT RESERVE OUT
+		 *    command with CLEAR service action was received, with the
+		 *    additional sense code set to RESERVATIONS PREEMPTED.
+		 */
+		if (!calling_it_nexus)
+			core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun,
+				0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+
+	pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n",
+		cmd->se_tfo->get_fabric_name());
+
+	if (pr_tmpl->pr_aptpl_active) {
+		core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
+		pr_debug("SPC-3 PR: Updated APTPL metadata"
+				" for CLEAR\n");
+	}
+
+	core_scsi3_pr_generation(dev);
+	return 0;
+}
+
+/*
+ * Called with struct se_device->dev_reservation_lock held.
+ */
+static void __core_scsi3_complete_pro_preempt(
+	struct se_device *dev,
+	struct t10_pr_registration *pr_reg,
+	struct list_head *preempt_and_abort_list,
+	int type,
+	int scope,
+	int abort)
+{
+	struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
+	struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+	/*
+	 * Do an implict RELEASE of the existing reservation.
+	 */
+	if (dev->dev_pr_res_holder)
+		__core_scsi3_complete_pro_release(dev, nacl,
+				dev->dev_pr_res_holder, 0);
+
+	dev->dev_pr_res_holder = pr_reg;
+	pr_reg->pr_res_holder = 1;
+	pr_reg->pr_res_type = type;
+	pr_reg->pr_res_scope = scope;
+
+	pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new"
+		" reservation holder TYPE: %s ALL_TG_PT: %d\n",
+		tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
+		core_scsi3_pr_dump_type(type),
+		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+	pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
+		tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
+		nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
+	/*
+	 * For PREEMPT_AND_ABORT, add the preempting reservation's
+	 * struct t10_pr_registration to the list that will be compared
+	 * against received CDBs..
+	 */
+	if (preempt_and_abort_list)
+		list_add_tail(&pr_reg->pr_reg_abort_list,
+				preempt_and_abort_list);
+}
+
+static void core_scsi3_release_preempt_and_abort(
+	struct list_head *preempt_and_abort_list,
+	struct t10_pr_registration *pr_reg_holder)
+{
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
+				pr_reg_abort_list) {
+
+		list_del(&pr_reg->pr_reg_abort_list);
+		if (pr_reg_holder == pr_reg)
+			continue;
+		if (pr_reg->pr_res_holder) {
+			pr_warn("pr_reg->pr_res_holder still set\n");
+			continue;
+		}
+
+		pr_reg->pr_reg_deve = NULL;
+		pr_reg->pr_reg_nacl = NULL;
+		kfree(pr_reg->pr_aptpl_buf);
+		kmem_cache_free(t10_pr_reg_cache, pr_reg);
+	}
+}
+
+static int core_scsi3_pro_preempt(
+	struct se_cmd *cmd,
+	int type,
+	int scope,
+	u64 res_key,
+	u64 sa_res_key,
+	int abort)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_node_acl *pr_reg_nacl;
+	struct se_session *se_sess = cmd->se_sess;
+	LIST_HEAD(preempt_and_abort_list);
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
+	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+	u32 pr_res_mapped_lun = 0;
+	int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
+	int prh_type = 0, prh_scope = 0, ret;
+
+	if (!se_sess) {
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		return -EINVAL;
+	}
+
+	pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
+				se_sess);
+	if (!pr_reg_n) {
+		pr_err("SPC-3 PR: Unable to locate"
+			" PR_REGISTERED *pr_reg for PREEMPT%s\n",
+			(abort) ? "_AND_ABORT" : "");
+		cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+		return -EINVAL;
+	}
+	if (pr_reg_n->pr_res_key != res_key) {
+		core_scsi3_put_pr_reg(pr_reg_n);
+		cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+		return -EINVAL;
+	}
+	if (scope != PR_SCOPE_LU_SCOPE) {
+		pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+		core_scsi3_put_pr_reg(pr_reg_n);
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		return -EINVAL;
+	}
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (pr_res_holder &&
+	   ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+	    (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)))
+		all_reg = 1;
+
+	if (!all_reg && !sa_res_key) {
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg_n);
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		return -EINVAL;
+	}
+	/*
+	 * From spc4r17, section 5.7.11.4.4 Removing Registrations:
+	 *
+	 * If the SERVICE ACTION RESERVATION KEY field does not identify a
+	 * persistent reservation holder or there is no persistent reservation
+	 * holder (i.e., there is no persistent reservation), then the device
+	 * server shall perform a preempt by doing the following in an
+	 * uninterrupted series of actions. (See below..)
+	 */
+	if (!pr_res_holder || (pr_res_holder->pr_res_key != sa_res_key)) {
+		/*
+		 * No existing or SA Reservation Key matching reservations..
+		 *
+		 * PROUT SA PREEMPT with All Registrant type reservations are
+		 * allowed to be processed without a matching SA Reservation Key
+		 */
+		spin_lock(&pr_tmpl->registration_lock);
+		list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+				&pr_tmpl->registration_list, pr_reg_list) {
+			/*
+			 * Removing of registrations in non all registrants
+			 * type reservations without a matching SA reservation
+			 * key.
+			 *
+			 * a) Remove the registrations for all I_T nexuses
+			 *    specified by the SERVICE ACTION RESERVATION KEY
+			 *    field;
+			 * b) Ignore the contents of the SCOPE and TYPE fields;
+			 * c) Process tasks as defined in 5.7.1; and
+			 * d) Establish a unit attention condition for the
+			 *    initiator port associated with every I_T nexus
+			 *    that lost its registration other than the I_T
+			 *    nexus on which the PERSISTENT RESERVE OUT command
+			 *    was received, with the additional sense code set
+			 *    to REGISTRATIONS PREEMPTED.
+			 */
+			if (!all_reg) {
+				if (pr_reg->pr_res_key != sa_res_key)
+					continue;
+
+				calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+				pr_reg_nacl = pr_reg->pr_reg_nacl;
+				pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+				__core_scsi3_free_registration(dev, pr_reg,
+					(abort) ? &preempt_and_abort_list :
+						NULL, calling_it_nexus);
+				released_regs++;
+			} else {
+				/*
+				 * Case for any existing all registrants type
+				 * reservation, follow logic in spc4r17 section
+				 * 5.7.11.4 Preempting, Table 52 and Figure 7.
+				 *
+				 * For a ZERO SA Reservation key, release
+				 * all other registrations and do an implict
+				 * release of active persistent reservation.
+				 *
+				 * For a non-ZERO SA Reservation key, only
+				 * release the matching reservation key from
+				 * registrations.
+				 */
+				if ((sa_res_key) &&
+				     (pr_reg->pr_res_key != sa_res_key))
+					continue;
+
+				calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+				if (calling_it_nexus)
+					continue;
+
+				pr_reg_nacl = pr_reg->pr_reg_nacl;
+				pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+				__core_scsi3_free_registration(dev, pr_reg,
+					(abort) ? &preempt_and_abort_list :
+						NULL, 0);
+				released_regs++;
+			}
+			if (!calling_it_nexus)
+				core_scsi3_ua_allocate(pr_reg_nacl,
+					pr_res_mapped_lun, 0x2A,
+					ASCQ_2AH_REGISTRATIONS_PREEMPTED);
+		}
+		spin_unlock(&pr_tmpl->registration_lock);
+		/*
+		 * If a PERSISTENT RESERVE OUT with a PREEMPT service action or
+		 * a PREEMPT AND ABORT service action sets the SERVICE ACTION
+		 * RESERVATION KEY field to a value that does not match any
+		 * registered reservation key, then the device server shall
+		 * complete the command with RESERVATION CONFLICT status.
+		 */
+		if (!released_regs) {
+			spin_unlock(&dev->dev_reservation_lock);
+			core_scsi3_put_pr_reg(pr_reg_n);
+			cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+			return -EINVAL;
+		}
+		/*
+		 * For an existing all registrants type reservation
+		 * with a zero SA rservation key, preempt the existing
+		 * reservation with the new PR type and scope.
+		 */
+		if (pr_res_holder && all_reg && !(sa_res_key)) {
+			__core_scsi3_complete_pro_preempt(dev, pr_reg_n,
+				(abort) ? &preempt_and_abort_list : NULL,
+				type, scope, abort);
+
+			if (abort)
+				core_scsi3_release_preempt_and_abort(
+					&preempt_and_abort_list, pr_reg_n);
+		}
+		spin_unlock(&dev->dev_reservation_lock);
+
+		if (pr_tmpl->pr_aptpl_active) {
+			ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+					&pr_reg_n->pr_aptpl_buf[0],
+					pr_tmpl->pr_aptpl_buf_len);
+			if (!ret)
+				pr_debug("SPC-3 PR: Updated APTPL"
+					" metadata for  PREEMPT%s\n", (abort) ?
+					"_AND_ABORT" : "");
+		}
+
+		core_scsi3_put_pr_reg(pr_reg_n);
+		core_scsi3_pr_generation(cmd->se_dev);
+		return 0;
+	}
+	/*
+	 * The PREEMPTing SA reservation key matches that of the
+	 * existing persistent reservation, first, we check if
+	 * we are preempting our own reservation.
+	 * From spc4r17, section 5.7.11.4.3 Preempting
+	 * persistent reservations and registration handling
+	 *
+	 * If an all registrants persistent reservation is not
+	 * present, it is not an error for the persistent
+	 * reservation holder to preempt itself (i.e., a
+	 * PERSISTENT RESERVE OUT with a PREEMPT service action
+	 * or a PREEMPT AND ABORT service action with the
+	 * SERVICE ACTION RESERVATION KEY value equal to the
+	 * persistent reservation holder's reservation key that
+	 * is received from the persistent reservation holder).
+	 * In that case, the device server shall establish the
+	 * new persistent reservation and maintain the
+	 * registration.
+	 */
+	prh_type = pr_res_holder->pr_res_type;
+	prh_scope = pr_res_holder->pr_res_scope;
+	/*
+	 * If the SERVICE ACTION RESERVATION KEY field identifies a
+	 * persistent reservation holder (see 5.7.10), the device
+	 * server shall perform a preempt by doing the following as
+	 * an uninterrupted series of actions:
+	 *
+	 * a) Release the persistent reservation for the holder
+	 *    identified by the SERVICE ACTION RESERVATION KEY field;
+	 */
+	if (pr_reg_n != pr_res_holder)
+		__core_scsi3_complete_pro_release(dev,
+				pr_res_holder->pr_reg_nacl,
+				dev->dev_pr_res_holder, 0);
+	/*
+	 * b) Remove the registrations for all I_T nexuses identified
+	 *    by the SERVICE ACTION RESERVATION KEY field, except the
+	 *    I_T nexus that is being used for the PERSISTENT RESERVE
+	 *    OUT command. If an all registrants persistent reservation
+	 *    is present and the SERVICE ACTION RESERVATION KEY field
+	 *    is set to zero, then all registrations shall be removed
+	 *    except for that of the I_T nexus that is being used for
+	 *    the PERSISTENT RESERVE OUT command;
+	 */
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+		if (calling_it_nexus)
+			continue;
+
+		if (pr_reg->pr_res_key != sa_res_key)
+			continue;
+
+		pr_reg_nacl = pr_reg->pr_reg_nacl;
+		pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+		__core_scsi3_free_registration(dev, pr_reg,
+				(abort) ? &preempt_and_abort_list : NULL,
+				calling_it_nexus);
+		/*
+		 * e) Establish a unit attention condition for the initiator
+		 *    port associated with every I_T nexus that lost its
+		 *    persistent reservation and/or registration, with the
+		 *    additional sense code set to REGISTRATIONS PREEMPTED;
+		 */
+		core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
+				ASCQ_2AH_REGISTRATIONS_PREEMPTED);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+	/*
+	 * c) Establish a persistent reservation for the preempting
+	 *    I_T nexus using the contents of the SCOPE and TYPE fields;
+	 */
+	__core_scsi3_complete_pro_preempt(dev, pr_reg_n,
+			(abort) ? &preempt_and_abort_list : NULL,
+			type, scope, abort);
+	/*
+	 * d) Process tasks as defined in 5.7.1;
+	 * e) See above..
+	 * f) If the type or scope has changed, then for every I_T nexus
+	 *    whose reservation key was not removed, except for the I_T
+	 *    nexus on which the PERSISTENT RESERVE OUT command was
+	 *    received, the device server shall establish a unit
+	 *    attention condition for the initiator port associated with
+	 *    that I_T nexus, with the additional sense code set to
+	 *    RESERVATIONS RELEASED. If the type or scope have not
+	 *    changed, then no unit attention condition(s) shall be
+	 *    established for this reason.
+	 */
+	if ((prh_type != type) || (prh_scope != scope)) {
+		spin_lock(&pr_tmpl->registration_lock);
+		list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+				&pr_tmpl->registration_list, pr_reg_list) {
+
+			calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+			if (calling_it_nexus)
+				continue;
+
+			core_scsi3_ua_allocate(pr_reg->pr_reg_nacl,
+					pr_reg->pr_res_mapped_lun, 0x2A,
+					ASCQ_2AH_RESERVATIONS_RELEASED);
+		}
+		spin_unlock(&pr_tmpl->registration_lock);
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+	/*
+	 * Call LUN_RESET logic upon list of struct t10_pr_registration,
+	 * All received CDBs for the matching existing reservation and
+	 * registrations undergo ABORT_TASK logic.
+	 *
+	 * From there, core_scsi3_release_preempt_and_abort() will
+	 * release every registration in the list (which have already
+	 * been removed from the primary pr_reg list), except the
+	 * new persistent reservation holder, the calling Initiator Port.
+	 */
+	if (abort) {
+		core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list, cmd);
+		core_scsi3_release_preempt_and_abort(&preempt_and_abort_list,
+						pr_reg_n);
+	}
+
+	if (pr_tmpl->pr_aptpl_active) {
+		ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+				&pr_reg_n->pr_aptpl_buf[0],
+				pr_tmpl->pr_aptpl_buf_len);
+		if (!ret)
+			pr_debug("SPC-3 PR: Updated APTPL metadata for PREEMPT"
+				"%s\n", (abort) ? "_AND_ABORT" : "");
+	}
+
+	core_scsi3_put_pr_reg(pr_reg_n);
+	core_scsi3_pr_generation(cmd->se_dev);
+	return 0;
+}
+
+static int core_scsi3_emulate_pro_preempt(
+	struct se_cmd *cmd,
+	int type,
+	int scope,
+	u64 res_key,
+	u64 sa_res_key,
+	int abort)
+{
+	int ret = 0;
+
+	switch (type) {
+	case PR_TYPE_WRITE_EXCLUSIVE:
+	case PR_TYPE_EXCLUSIVE_ACCESS:
+	case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+	case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+	case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+	case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+		ret = core_scsi3_pro_preempt(cmd, type, scope,
+				res_key, sa_res_key, abort);
+		break;
+	default:
+		pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
+			" Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
+		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+
+static int core_scsi3_emulate_pro_register_and_move(
+	struct se_cmd *cmd,
+	u64 res_key,
+	u64 sa_res_key,
+	int aptpl,
+	int unreg)
+{
+	struct se_session *se_sess = cmd->se_sess;
+	struct se_device *dev = cmd->se_dev;
+	struct se_dev_entry *dest_se_deve = NULL;
+	struct se_lun *se_lun = cmd->se_lun;
+	struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
+	struct se_port *se_port;
+	struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
+	struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
+	struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
+	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+	unsigned char *buf;
+	unsigned char *initiator_str;
+	char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+	u32 tid_len, tmp_tid_len;
+	int new_reg = 0, type, scope, ret, matching_iname, prf_isid;
+	unsigned short rtpi;
+	unsigned char proto_ident;
+
+	if (!se_sess || !se_lun) {
+		pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		return -EINVAL;
+	}
+	memset(dest_iport, 0, 64);
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	se_tpg = se_sess->se_tpg;
+	tf_ops = se_tpg->se_tpg_tfo;
+	/*
+	 * Follow logic from spc4r17 Section 5.7.8, Table 50 --
+	 *	Register behaviors for a REGISTER AND MOVE service action
+	 *
+	 * Locate the existing *pr_reg via struct se_node_acl pointers
+	 */
+	pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
+				se_sess);
+	if (!pr_reg) {
+		pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
+			" *pr_reg for REGISTER_AND_MOVE\n");
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		return -EINVAL;
+	}
+	/*
+	 * The provided reservation key much match the existing reservation key
+	 * provided during this initiator's I_T nexus registration.
+	 */
+	if (res_key != pr_reg->pr_res_key) {
+		pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received"
+			" res_key: 0x%016Lx does not match existing SA REGISTER"
+			" res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+		core_scsi3_put_pr_reg(pr_reg);
+		cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+		return -EINVAL;
+	}
+	/*
+	 * The service active reservation key needs to be non zero
+	 */
+	if (!sa_res_key) {
+		pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
+			" sa_res_key\n");
+		core_scsi3_put_pr_reg(pr_reg);
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		return -EINVAL;
+	}
+
+	/*
+	 * Determine the Relative Target Port Identifier where the reservation
+	 * will be moved to for the TransportID containing SCSI initiator WWN
+	 * information.
+	 */
+	buf = transport_kmap_data_sg(cmd);
+	rtpi = (buf[18] & 0xff) << 8;
+	rtpi |= buf[19] & 0xff;
+	tid_len = (buf[20] & 0xff) << 24;
+	tid_len |= (buf[21] & 0xff) << 16;
+	tid_len |= (buf[22] & 0xff) << 8;
+	tid_len |= buf[23] & 0xff;
+	transport_kunmap_data_sg(cmd);
+	buf = NULL;
+
+	if ((tid_len + 24) != cmd->data_length) {
+		pr_err("SPC-3 PR: Illegal tid_len: %u + 24 byte header"
+			" does not equal CDB data_length: %u\n", tid_len,
+			cmd->data_length);
+		core_scsi3_put_pr_reg(pr_reg);
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		return -EINVAL;
+	}
+
+	spin_lock(&dev->se_port_lock);
+	list_for_each_entry(se_port, &dev->dev_sep_list, sep_list) {
+		if (se_port->sep_rtpi != rtpi)
+			continue;
+		dest_se_tpg = se_port->sep_tpg;
+		if (!dest_se_tpg)
+			continue;
+		dest_tf_ops = dest_se_tpg->se_tpg_tfo;
+		if (!dest_tf_ops)
+			continue;
+
+		atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&dev->se_port_lock);
+
+		ret = core_scsi3_tpg_depend_item(dest_se_tpg);
+		if (ret != 0) {
+			pr_err("core_scsi3_tpg_depend_item() failed"
+				" for dest_se_tpg\n");
+			atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
+			smp_mb__after_atomic_dec();
+			core_scsi3_put_pr_reg(pr_reg);
+			cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+			return -EINVAL;
+		}
+
+		spin_lock(&dev->se_port_lock);
+		break;
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	if (!dest_se_tpg || !dest_tf_ops) {
+		pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+			" fabric ops from Relative Target Port Identifier:"
+			" %hu\n", rtpi);
+		core_scsi3_put_pr_reg(pr_reg);
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		return -EINVAL;
+	}
+
+	buf = transport_kmap_data_sg(cmd);
+	proto_ident = (buf[24] & 0x0f);
+#if 0
+	pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
+			" 0x%02x\n", proto_ident);
+#endif
+	if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
+		pr_err("SPC-3 PR REGISTER_AND_MOVE: Received"
+			" proto_ident: 0x%02x does not match ident: 0x%02x"
+			" from fabric: %s\n", proto_ident,
+			dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
+			dest_tf_ops->get_fabric_name());
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		ret = -EINVAL;
+		goto out;
+	}
+	if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
+		pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
+			" containg a valid tpg_parse_pr_out_transport_id"
+			" function pointer\n");
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		ret = -EINVAL;
+		goto out;
+	}
+	initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
+			(const char *)&buf[24], &tmp_tid_len, &iport_ptr);
+	if (!initiator_str) {
+		pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+			" initiator_str from Transport ID\n");
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		ret = -EINVAL;
+		goto out;
+	}
+
+	transport_kunmap_data_sg(cmd);
+	buf = NULL;
+
+	pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s"
+		" %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ?
+		"port" : "device", initiator_str, (iport_ptr != NULL) ?
+		iport_ptr : "");
+	/*
+	 * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service
+	 * action specifies a TransportID that is the same as the initiator port
+	 * of the I_T nexus for the command received, then the command shall
+	 * be terminated with CHECK CONDITION status, with the sense key set to
+	 * ILLEGAL REQUEST, and the additional sense code set to INVALID FIELD
+	 * IN PARAMETER LIST.
+	 */
+	pr_reg_nacl = pr_reg->pr_reg_nacl;
+	matching_iname = (!strcmp(initiator_str,
+				  pr_reg_nacl->initiatorname)) ? 1 : 0;
+	if (!matching_iname)
+		goto after_iport_check;
+
+	if (!iport_ptr || !pr_reg->isid_present_at_reg) {
+		pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
+			" matches: %s on received I_T Nexus\n", initiator_str,
+			pr_reg_nacl->initiatorname);
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		ret = -EINVAL;
+		goto out;
+	}
+	if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
+		pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s"
+			" matches: %s %s on received I_T Nexus\n",
+			initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
+			pr_reg->pr_reg_isid);
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		ret = -EINVAL;
+		goto out;
+	}
+after_iport_check:
+	/*
+	 * Locate the destination struct se_node_acl from the received Transport ID
+	 */
+	spin_lock_irq(&dest_se_tpg->acl_node_lock);
+	dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
+				initiator_str);
+	if (dest_node_acl) {
+		atomic_inc(&dest_node_acl->acl_pr_ref_count);
+		smp_mb__after_atomic_inc();
+	}
+	spin_unlock_irq(&dest_se_tpg->acl_node_lock);
+
+	if (!dest_node_acl) {
+		pr_err("Unable to locate %s dest_node_acl for"
+			" TransportID%s\n", dest_tf_ops->get_fabric_name(),
+			initiator_str);
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		ret = -EINVAL;
+		goto out;
+	}
+	ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
+	if (ret != 0) {
+		pr_err("core_scsi3_nodeacl_depend_item() for"
+			" dest_node_acl\n");
+		atomic_dec(&dest_node_acl->acl_pr_ref_count);
+		smp_mb__after_atomic_dec();
+		dest_node_acl = NULL;
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		ret = -EINVAL;
+		goto out;
+	}
+#if 0
+	pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
+		" %s from TransportID\n", dest_tf_ops->get_fabric_name(),
+		dest_node_acl->initiatorname);
+#endif
+	/*
+	 * Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET
+	 * PORT IDENTIFIER.
+	 */
+	dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi);
+	if (!dest_se_deve) {
+		pr_err("Unable to locate %s dest_se_deve from RTPI:"
+			" %hu\n",  dest_tf_ops->get_fabric_name(), rtpi);
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = core_scsi3_lunacl_depend_item(dest_se_deve);
+	if (ret < 0) {
+		pr_err("core_scsi3_lunacl_depend_item() failed\n");
+		atomic_dec(&dest_se_deve->pr_ref_count);
+		smp_mb__after_atomic_dec();
+		dest_se_deve = NULL;
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		ret = -EINVAL;
+		goto out;
+	}
+#if 0
+	pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
+		" ACL for dest_se_deve->mapped_lun: %u\n",
+		dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
+		dest_se_deve->mapped_lun);
+#endif
+	/*
+	 * A persistent reservation needs to already existing in order to
+	 * successfully complete the REGISTER_AND_MOVE service action..
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (!pr_res_holder) {
+		pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
+			" currently held\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+		ret = -EINVAL;
+		goto out;
+	}
+	/*
+	 * The received on I_T Nexus must be the reservation holder.
+	 *
+	 * From spc4r17 section 5.7.8  Table 50 --
+	 * 	Register behaviors for a REGISTER AND MOVE service action
+	 */
+	if (pr_res_holder != pr_reg) {
+		pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
+			" Nexus is not reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+		ret = -EINVAL;
+		goto out;
+	}
+	/*
+	 * From spc4r17 section 5.7.8: registering and moving reservation
+	 *
+	 * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service
+	 * action is received and the established persistent reservation is a
+	 * Write Exclusive - All Registrants type or Exclusive Access -
+	 * All Registrants type reservation, then the command shall be completed
+	 * with RESERVATION CONFLICT status.
+	 */
+	if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+	    (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+		pr_warn("SPC-3 PR REGISTER_AND_MOVE: Unable to move"
+			" reservation for type: %s\n",
+			core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
+		spin_unlock(&dev->dev_reservation_lock);
+		cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+		ret = -EINVAL;
+		goto out;
+	}
+	pr_res_nacl = pr_res_holder->pr_reg_nacl;
+	/*
+	 * b) Ignore the contents of the (received) SCOPE and TYPE fields;
+	 */
+	type = pr_res_holder->pr_res_type;
+	scope = pr_res_holder->pr_res_type;
+	/*
+	 * c) Associate the reservation key specified in the SERVICE ACTION
+	 *    RESERVATION KEY field with the I_T nexus specified as the
+	 *    destination of the register and move, where:
+	 *    A) The I_T nexus is specified by the TransportID and the
+	 *	 RELATIVE TARGET PORT IDENTIFIER field (see 6.14.4); and
+	 *    B) Regardless of the TransportID format used, the association for
+	 *       the initiator port is based on either the initiator port name
+	 *       (see 3.1.71) on SCSI transport protocols where port names are
+	 *       required or the initiator port identifier (see 3.1.70) on SCSI
+	 *       transport protocols where port names are not required;
+	 * d) Register the reservation key specified in the SERVICE ACTION
+	 *    RESERVATION KEY field;
+	 * e) Retain the reservation key specified in the SERVICE ACTION
+	 *    RESERVATION KEY field and associated information;
+	 *
+	 * Also, It is not an error for a REGISTER AND MOVE service action to
+	 * register an I_T nexus that is already registered with the same
+	 * reservation key or a different reservation key.
+	 */
+	dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+					iport_ptr);
+	if (!dest_pr_reg) {
+		ret = core_scsi3_alloc_registration(cmd->se_dev,
+				dest_node_acl, dest_se_deve, iport_ptr,
+				sa_res_key, 0, aptpl, 2, 1);
+		if (ret != 0) {
+			spin_unlock(&dev->dev_reservation_lock);
+			cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+			ret = -EINVAL;
+			goto out;
+		}
+		dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+						iport_ptr);
+		new_reg = 1;
+	}
+	/*
+	 * f) Release the persistent reservation for the persistent reservation
+	 *    holder (i.e., the I_T nexus on which the
+	 */
+	__core_scsi3_complete_pro_release(dev, pr_res_nacl,
+			dev->dev_pr_res_holder, 0);
+	/*
+	 * g) Move the persistent reservation to the specified I_T nexus using
+	 *    the same scope and type as the persistent reservation released in
+	 *    item f); and
+	 */
+	dev->dev_pr_res_holder = dest_pr_reg;
+	dest_pr_reg->pr_res_holder = 1;
+	dest_pr_reg->pr_res_type = type;
+	pr_reg->pr_res_scope = scope;
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+	/*
+	 * Increment PRGeneration for existing registrations..
+	 */
+	if (!new_reg)
+		dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	pr_debug("SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
+		" created new reservation holder TYPE: %s on object RTPI:"
+		" %hu  PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(),
+		core_scsi3_pr_dump_type(type), rtpi,
+		dest_pr_reg->pr_res_generation);
+	pr_debug("SPC-3 PR Successfully moved reservation from"
+		" %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
+		tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname,
+		(prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(),
+		dest_node_acl->initiatorname, (iport_ptr != NULL) ?
+		iport_ptr : "");
+	/*
+	 * It is now safe to release configfs group dependencies for destination
+	 * of Transport ID Initiator Device/Port Identifier
+	 */
+	core_scsi3_lunacl_undepend_item(dest_se_deve);
+	core_scsi3_nodeacl_undepend_item(dest_node_acl);
+	core_scsi3_tpg_undepend_item(dest_se_tpg);
+	/*
+	 * h) If the UNREG bit is set to one, unregister (see 5.7.11.3) the I_T
+	 * nexus on which PERSISTENT RESERVE OUT command was received.
+	 */
+	if (unreg) {
+		spin_lock(&pr_tmpl->registration_lock);
+		__core_scsi3_free_registration(dev, pr_reg, NULL, 1);
+		spin_unlock(&pr_tmpl->registration_lock);
+	} else
+		core_scsi3_put_pr_reg(pr_reg);
+
+	/*
+	 * Clear the APTPL metadata if APTPL has been disabled, otherwise
+	 * write out the updated metadata to struct file for this SCSI device.
+	 */
+	if (!aptpl) {
+		pr_tmpl->pr_aptpl_active = 0;
+		core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
+		pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for"
+				" REGISTER_AND_MOVE\n");
+	} else {
+		pr_tmpl->pr_aptpl_active = 1;
+		ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+				&dest_pr_reg->pr_aptpl_buf[0],
+				pr_tmpl->pr_aptpl_buf_len);
+		if (!ret)
+			pr_debug("SPC-3 PR: Set APTPL Bit Activated for"
+					" REGISTER_AND_MOVE\n");
+	}
+
+	transport_kunmap_data_sg(cmd);
+
+	core_scsi3_put_pr_reg(dest_pr_reg);
+	return 0;
+out:
+	if (buf)
+		transport_kunmap_data_sg(cmd);
+	if (dest_se_deve)
+		core_scsi3_lunacl_undepend_item(dest_se_deve);
+	if (dest_node_acl)
+		core_scsi3_nodeacl_undepend_item(dest_node_acl);
+	core_scsi3_tpg_undepend_item(dest_se_tpg);
+	core_scsi3_put_pr_reg(pr_reg);
+	return ret;
+}
+
+static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
+{
+	unsigned int __v1, __v2;
+
+	__v1 = (cdb[0] << 24) | (cdb[1] << 16) | (cdb[2] << 8) | cdb[3];
+	__v2 = (cdb[4] << 24) | (cdb[5] << 16) | (cdb[6] << 8) | cdb[7];
+
+	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+/*
+ * See spc4r17 section 6.14 Table 170
+ */
+int target_scsi3_emulate_pr_out(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	unsigned char *cdb = &cmd->t_task_cdb[0];
+	unsigned char *buf;
+	u64 res_key, sa_res_key;
+	int sa, scope, type, aptpl;
+	int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
+	int ret;
+
+	/*
+	 * Following spc2r20 5.5.1 Reservations overview:
+	 *
+	 * If a logical unit has been reserved by any RESERVE command and is
+	 * still reserved by any initiator, all PERSISTENT RESERVE IN and all
+	 * PERSISTENT RESERVE OUT commands shall conflict regardless of
+	 * initiator or service action and shall terminate with a RESERVATION
+	 * CONFLICT status.
+	 */
+	if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) {
+		pr_err("Received PERSISTENT_RESERVE CDB while legacy"
+			" SPC-2 reservation is held, returning"
+			" RESERVATION_CONFLICT\n");
+		cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * FIXME: A NULL struct se_session pointer means an this is not coming from
+	 * a $FABRIC_MOD's nexus, but from internal passthrough ops.
+	 */
+	if (!cmd->se_sess) {
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (cmd->data_length < 24) {
+		pr_warn("SPC-PR: Received PR OUT parameter list"
+			" length too small: %u\n", cmd->data_length);
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		ret = -EINVAL;
+		goto out;
+	}
+	/*
+	 * From the PERSISTENT_RESERVE_OUT command descriptor block (CDB)
+	 */
+	sa = (cdb[1] & 0x1f);
+	scope = (cdb[2] & 0xf0);
+	type = (cdb[2] & 0x0f);
+
+	buf = transport_kmap_data_sg(cmd);
+	/*
+	 * From PERSISTENT_RESERVE_OUT parameter list (payload)
+	 */
+	res_key = core_scsi3_extract_reservation_key(&buf[0]);
+	sa_res_key = core_scsi3_extract_reservation_key(&buf[8]);
+	/*
+	 * REGISTER_AND_MOVE uses a different SA parameter list containing
+	 * SCSI TransportIDs.
+	 */
+	if (sa != PRO_REGISTER_AND_MOVE) {
+		spec_i_pt = (buf[20] & 0x08);
+		all_tg_pt = (buf[20] & 0x04);
+		aptpl = (buf[20] & 0x01);
+	} else {
+		aptpl = (buf[17] & 0x01);
+		unreg = (buf[17] & 0x02);
+	}
+	transport_kunmap_data_sg(cmd);
+	buf = NULL;
+
+	/*
+	 * SPEC_I_PT=1 is only valid for Service action: REGISTER
+	 */
+	if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * From spc4r17 section 6.14:
+	 *
+	 * If the SPEC_I_PT bit is set to zero, the service action is not
+	 * REGISTER AND MOVE, and the parameter list length is not 24, then
+	 * the command shall be terminated with CHECK CONDITION status, with
+	 * the sense key set to ILLEGAL REQUEST, and the additional sense
+	 * code set to PARAMETER LIST LENGTH ERROR.
+	 */
+	if (!spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
+	    (cmd->data_length != 24)) {
+		pr_warn("SPC-PR: Received PR OUT illegal parameter"
+			" list length: %u\n", cmd->data_length);
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		ret = -EINVAL;
+		goto out;
+	}
+	/*
+	 * (core_scsi3_emulate_pro_* function parameters
+	 * are defined by spc4r17 Table 174:
+	 * PERSISTENT_RESERVE_OUT service actions and valid parameters.
+	 */
+	switch (sa) {
+	case PRO_REGISTER:
+		ret = core_scsi3_emulate_pro_register(cmd,
+			res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 0);
+		break;
+	case PRO_RESERVE:
+		ret = core_scsi3_emulate_pro_reserve(cmd, type, scope, res_key);
+		break;
+	case PRO_RELEASE:
+		ret = core_scsi3_emulate_pro_release(cmd, type, scope, res_key);
+		break;
+	case PRO_CLEAR:
+		ret = core_scsi3_emulate_pro_clear(cmd, res_key);
+		break;
+	case PRO_PREEMPT:
+		ret = core_scsi3_emulate_pro_preempt(cmd, type, scope,
+					res_key, sa_res_key, 0);
+		break;
+	case PRO_PREEMPT_AND_ABORT:
+		ret = core_scsi3_emulate_pro_preempt(cmd, type, scope,
+					res_key, sa_res_key, 1);
+		break;
+	case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
+		ret = core_scsi3_emulate_pro_register(cmd,
+			0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 1);
+		break;
+	case PRO_REGISTER_AND_MOVE:
+		ret = core_scsi3_emulate_pro_register_and_move(cmd, res_key,
+				sa_res_key, aptpl, unreg);
+		break;
+	default:
+		pr_err("Unknown PERSISTENT_RESERVE_OUT service"
+			" action: 0x%02x\n", cdb[1] & 0x1f);
+		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+		ret = -EINVAL;
+		break;
+	}
+
+out:
+	if (!ret) {
+		task->task_scsi_status = GOOD;
+		transport_complete_task(task, 1);
+	}
+	return ret;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_KEYS
+ *
+ * See spc4r17 section 5.7.6.2 and section 6.13.2, Table 160
+ */
+static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
+{
+	struct se_device *se_dev = cmd->se_dev;
+	struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
+	struct t10_pr_registration *pr_reg;
+	unsigned char *buf;
+	u32 add_len = 0, off = 8;
+
+	if (cmd->data_length < 8) {
+		pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
+			" too small\n", cmd->data_length);
+		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+		return -EINVAL;
+	}
+
+	buf = transport_kmap_data_sg(cmd);
+	buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
+	buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
+	buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
+	buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
+
+	spin_lock(&su_dev->t10_pr.registration_lock);
+	list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
+			pr_reg_list) {
+		/*
+		 * Check for overflow of 8byte PRI READ_KEYS payload and
+		 * next reservation key list descriptor.
+		 */
+		if ((add_len + 8) > (cmd->data_length - 8))
+			break;
+
+		buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
+		buf[off++] = (pr_reg->pr_res_key & 0xff);
+
+		add_len += 8;
+	}
+	spin_unlock(&su_dev->t10_pr.registration_lock);
+
+	buf[4] = ((add_len >> 24) & 0xff);
+	buf[5] = ((add_len >> 16) & 0xff);
+	buf[6] = ((add_len >> 8) & 0xff);
+	buf[7] = (add_len & 0xff);
+
+	transport_kunmap_data_sg(cmd);
+
+	return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_RESERVATION
+ *
+ * See spc4r17 section 5.7.6.3 and section 6.13.3.2 Table 161 and 162
+ */
+static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
+{
+	struct se_device *se_dev = cmd->se_dev;
+	struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
+	struct t10_pr_registration *pr_reg;
+	unsigned char *buf;
+	u64 pr_res_key;
+	u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
+
+	if (cmd->data_length < 8) {
+		pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
+			" too small\n", cmd->data_length);
+		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+		return -EINVAL;
+	}
+
+	buf = transport_kmap_data_sg(cmd);
+	buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
+	buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
+	buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
+	buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
+
+	spin_lock(&se_dev->dev_reservation_lock);
+	pr_reg = se_dev->dev_pr_res_holder;
+	if ((pr_reg)) {
+		/*
+		 * Set the hardcoded Additional Length
+		 */
+		buf[4] = ((add_len >> 24) & 0xff);
+		buf[5] = ((add_len >> 16) & 0xff);
+		buf[6] = ((add_len >> 8) & 0xff);
+		buf[7] = (add_len & 0xff);
+
+		if (cmd->data_length < 22)
+			goto err;
+
+		/*
+		 * Set the Reservation key.
+		 *
+		 * From spc4r17, section 5.7.10:
+		 * A persistent reservation holder has its reservation key
+		 * returned in the parameter data from a PERSISTENT
+		 * RESERVE IN command with READ RESERVATION service action as
+		 * follows:
+		 * a) For a persistent reservation of the type Write Exclusive
+		 *    - All Registrants or Exclusive Access ­ All Regitrants,
+		 *      the reservation key shall be set to zero; or
+		 * b) For all other persistent reservation types, the
+		 *    reservation key shall be set to the registered
+		 *    reservation key for the I_T nexus that holds the
+		 *    persistent reservation.
+		 */
+		if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+		    (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
+			pr_res_key = 0;
+		else
+			pr_res_key = pr_reg->pr_res_key;
+
+		buf[8] = ((pr_res_key >> 56) & 0xff);
+		buf[9] = ((pr_res_key >> 48) & 0xff);
+		buf[10] = ((pr_res_key >> 40) & 0xff);
+		buf[11] = ((pr_res_key >> 32) & 0xff);
+		buf[12] = ((pr_res_key >> 24) & 0xff);
+		buf[13] = ((pr_res_key >> 16) & 0xff);
+		buf[14] = ((pr_res_key >> 8) & 0xff);
+		buf[15] = (pr_res_key & 0xff);
+		/*
+		 * Set the SCOPE and TYPE
+		 */
+		buf[21] = (pr_reg->pr_res_scope & 0xf0) |
+			  (pr_reg->pr_res_type & 0x0f);
+	}
+
+err:
+	spin_unlock(&se_dev->dev_reservation_lock);
+	transport_kunmap_data_sg(cmd);
+
+	return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action REPORT_CAPABILITIES
+ *
+ * See spc4r17 section 6.13.4 Table 165
+ */
+static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+	unsigned char *buf;
+	u16 add_len = 8; /* Hardcoded to 8. */
+
+	if (cmd->data_length < 6) {
+		pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
+			" %u too small\n", cmd->data_length);
+		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+		return -EINVAL;
+	}
+
+	buf = transport_kmap_data_sg(cmd);
+
+	buf[0] = ((add_len << 8) & 0xff);
+	buf[1] = (add_len & 0xff);
+	buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
+	buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
+	buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */
+	buf[2] |= 0x01; /* PTPL_C: Persistence across Target Power Loss bit */
+	/*
+	 * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so
+	 * set the TMV: Task Mask Valid bit.
+	 */
+	buf[3] |= 0x80;
+	/*
+	 * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166
+	 */
+	buf[3] |= 0x10; /* ALLOW COMMANDs field 001b */
+	/*
+	 * PTPL_A: Persistence across Target Power Loss Active bit
+	 */
+	if (pr_tmpl->pr_aptpl_active)
+		buf[3] |= 0x01;
+	/*
+	 * Setup the PERSISTENT RESERVATION TYPE MASK from Table 167
+	 */
+	buf[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+	buf[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */
+	buf[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */
+	buf[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */
+	buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
+	buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+
+	transport_kunmap_data_sg(cmd);
+
+	return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_FULL_STATUS
+ *
+ * See spc4r17 section 6.13.5 Table 168 and 169
+ */
+static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
+{
+	struct se_device *se_dev = cmd->se_dev;
+	struct se_node_acl *se_nacl;
+	struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
+	struct se_portal_group *se_tpg;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+	struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr;
+	unsigned char *buf;
+	u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
+	u32 off = 8; /* off into first Full Status descriptor */
+	int format_code = 0;
+
+	if (cmd->data_length < 8) {
+		pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
+			" too small\n", cmd->data_length);
+		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+		return -EINVAL;
+	}
+
+	buf = transport_kmap_data_sg(cmd);
+
+	buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
+	buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
+	buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
+	buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		se_nacl = pr_reg->pr_reg_nacl;
+		se_tpg = pr_reg->pr_reg_nacl->se_tpg;
+		add_desc_len = 0;
+
+		atomic_inc(&pr_reg->pr_res_holders);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&pr_tmpl->registration_lock);
+		/*
+		 * Determine expected length of $FABRIC_MOD specific
+		 * TransportID full status descriptor..
+		 */
+		exp_desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id_len(
+				se_tpg, se_nacl, pr_reg, &format_code);
+
+		if ((exp_desc_len + add_len) > cmd->data_length) {
+			pr_warn("SPC-3 PRIN READ_FULL_STATUS ran"
+				" out of buffer: %d\n", cmd->data_length);
+			spin_lock(&pr_tmpl->registration_lock);
+			atomic_dec(&pr_reg->pr_res_holders);
+			smp_mb__after_atomic_dec();
+			break;
+		}
+		/*
+		 * Set RESERVATION KEY
+		 */
+		buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
+		buf[off++] = (pr_reg->pr_res_key & 0xff);
+		off += 4; /* Skip Over Reserved area */
+
+		/*
+		 * Set ALL_TG_PT bit if PROUT SA REGISTER had this set.
+		 */
+		if (pr_reg->pr_reg_all_tg_pt)
+			buf[off] = 0x02;
+		/*
+		 * The struct se_lun pointer will be present for the
+		 * reservation holder for PR_HOLDER bit.
+		 *
+		 * Also, if this registration is the reservation
+		 * holder, fill in SCOPE and TYPE in the next byte.
+		 */
+		if (pr_reg->pr_res_holder) {
+			buf[off++] |= 0x01;
+			buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
+				     (pr_reg->pr_res_type & 0x0f);
+		} else
+			off += 2;
+
+		off += 4; /* Skip over reserved area */
+		/*
+		 * From spc4r17 6.3.15:
+		 *
+		 * If the ALL_TG_PT bit set to zero, the RELATIVE TARGET PORT
+		 * IDENTIFIER field contains the relative port identifier (see
+		 * 3.1.120) of the target port that is part of the I_T nexus
+		 * described by this full status descriptor. If the ALL_TG_PT
+		 * bit is set to one, the contents of the RELATIVE TARGET PORT
+		 * IDENTIFIER field are not defined by this standard.
+		 */
+		if (!pr_reg->pr_reg_all_tg_pt) {
+			struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep;
+
+			buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
+			buf[off++] = (port->sep_rtpi & 0xff);
+		} else
+			off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFER */
+
+		/*
+		 * Now, have the $FABRIC_MOD fill in the protocol identifier
+		 */
+		desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id(se_tpg,
+				se_nacl, pr_reg, &format_code, &buf[off+4]);
+
+		spin_lock(&pr_tmpl->registration_lock);
+		atomic_dec(&pr_reg->pr_res_holders);
+		smp_mb__after_atomic_dec();
+		/*
+		 * Set the ADDITIONAL DESCRIPTOR LENGTH
+		 */
+		buf[off++] = ((desc_len >> 24) & 0xff);
+		buf[off++] = ((desc_len >> 16) & 0xff);
+		buf[off++] = ((desc_len >> 8) & 0xff);
+		buf[off++] = (desc_len & 0xff);
+		/*
+		 * Size of full desctipor header minus TransportID
+		 * containing $FABRIC_MOD specific) initiator device/port
+		 * WWN information.
+		 *
+		 *  See spc4r17 Section 6.13.5 Table 169
+		 */
+		add_desc_len = (24 + desc_len);
+
+		off += desc_len;
+		add_len += add_desc_len;
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+	/*
+	 * Set ADDITIONAL_LENGTH
+	 */
+	buf[4] = ((add_len >> 24) & 0xff);
+	buf[5] = ((add_len >> 16) & 0xff);
+	buf[6] = ((add_len >> 8) & 0xff);
+	buf[7] = (add_len & 0xff);
+
+	transport_kunmap_data_sg(cmd);
+
+	return 0;
+}
+
+int target_scsi3_emulate_pr_in(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	int ret;
+
+	/*
+	 * Following spc2r20 5.5.1 Reservations overview:
+	 *
+	 * If a logical unit has been reserved by any RESERVE command and is
+	 * still reserved by any initiator, all PERSISTENT RESERVE IN and all
+	 * PERSISTENT RESERVE OUT commands shall conflict regardless of
+	 * initiator or service action and shall terminate with a RESERVATION
+	 * CONFLICT status.
+	 */
+	if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) {
+		pr_err("Received PERSISTENT_RESERVE CDB while legacy"
+			" SPC-2 reservation is held, returning"
+			" RESERVATION_CONFLICT\n");
+		cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+		return -EINVAL;
+	}
+
+	switch (cmd->t_task_cdb[1] & 0x1f) {
+	case PRI_READ_KEYS:
+		ret = core_scsi3_pri_read_keys(cmd);
+		break;
+	case PRI_READ_RESERVATION:
+		ret = core_scsi3_pri_read_reservation(cmd);
+		break;
+	case PRI_REPORT_CAPABILITIES:
+		ret = core_scsi3_pri_report_capabilities(cmd);
+		break;
+	case PRI_READ_FULL_STATUS:
+		ret = core_scsi3_pri_read_full_status(cmd);
+		break;
+	default:
+		pr_err("Unknown PERSISTENT_RESERVE_IN service"
+			" action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
+		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+		ret = -EINVAL;
+		break;
+	}
+
+	if (!ret) {
+		task->task_scsi_status = GOOD;
+		transport_complete_task(task, 1);
+	}
+	return ret;
+}
+
+static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type)
+{
+	return 0;
+}
+
+static int core_pt_seq_non_holder(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u32 pr_reg_type)
+{
+	return 0;
+}
+
+int core_setup_reservations(struct se_device *dev, int force_pt)
+{
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	struct t10_reservation *rest = &su_dev->t10_pr;
+	/*
+	 * If this device is from Target_Core_Mod/pSCSI, use the reservations
+	 * of the Underlying SCSI hardware.  In Linux/SCSI terms, this can
+	 * cause a problem because libata and some SATA RAID HBAs appear
+	 * under Linux/SCSI, but to emulate reservations themselves.
+	 */
+	if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
+	    !(dev->se_sub_dev->se_dev_attrib.emulate_reservations)) || force_pt) {
+		rest->res_type = SPC_PASSTHROUGH;
+		rest->pr_ops.t10_reservation_check = &core_pt_reservation_check;
+		rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder;
+		pr_debug("%s: Using SPC_PASSTHROUGH, no reservation"
+			" emulation\n", dev->transport->name);
+		return 0;
+	}
+	/*
+	 * If SPC-3 or above is reported by real or emulated struct se_device,
+	 * use emulated Persistent Reservations.
+	 */
+	if (dev->transport->get_device_rev(dev) >= SCSI_3) {
+		rest->res_type = SPC3_PERSISTENT_RESERVATIONS;
+		rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check;
+		rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder;
+		pr_debug("%s: Using SPC3_PERSISTENT_RESERVATIONS"
+			" emulation\n", dev->transport->name);
+	} else {
+		rest->res_type = SPC2_RESERVATIONS;
+		rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check;
+		rest->pr_ops.t10_seq_non_holder =
+				&core_scsi2_reservation_seq_non_holder;
+		pr_debug("%s: Using SPC2_RESERVATIONS emulation\n",
+			dev->transport->name);
+	}
+
+	return 0;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_pr.h b/ap/os/linux/linux-3.4.x/drivers/target/target_core_pr.h
new file mode 100644
index 0000000..7a233fe
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_pr.h
@@ -0,0 +1,68 @@
+#ifndef TARGET_CORE_PR_H
+#define TARGET_CORE_PR_H
+/*
+ * PERSISTENT_RESERVE_OUT service action codes
+ *
+ * spc4r17 section 6.14.2 Table 171
+ */
+#define PRO_REGISTER				0x00
+#define PRO_RESERVE				0x01
+#define PRO_RELEASE				0x02
+#define PRO_CLEAR				0x03
+#define PRO_PREEMPT				0x04
+#define PRO_PREEMPT_AND_ABORT			0x05
+#define PRO_REGISTER_AND_IGNORE_EXISTING_KEY	0x06
+#define PRO_REGISTER_AND_MOVE			0x07
+/*
+ * PERSISTENT_RESERVE_IN service action codes
+ *
+ * spc4r17 section 6.13.1 Table 159
+ */
+#define PRI_READ_KEYS				0x00
+#define PRI_READ_RESERVATION			0x01
+#define PRI_REPORT_CAPABILITIES			0x02
+#define PRI_READ_FULL_STATUS			0x03
+/*
+ * PERSISTENT_RESERVE_ SCOPE field
+ *
+ * spc4r17 section 6.13.3.3 Table 163
+ */
+#define PR_SCOPE_LU_SCOPE			0x00
+/*
+ * PERSISTENT_RESERVE_* TYPE field
+ *
+ * spc4r17 section 6.13.3.4 Table 164
+ */
+#define PR_TYPE_WRITE_EXCLUSIVE			0x01
+#define PR_TYPE_EXCLUSIVE_ACCESS		0x03
+#define PR_TYPE_WRITE_EXCLUSIVE_REGONLY		0x05
+#define PR_TYPE_EXCLUSIVE_ACCESS_REGONLY	0x06
+#define PR_TYPE_WRITE_EXCLUSIVE_ALLREG		0x07
+#define PR_TYPE_EXCLUSIVE_ACCESS_ALLREG		0x08
+
+#define PR_APTPL_MAX_IPORT_LEN			256
+#define PR_APTPL_MAX_TPORT_LEN			256
+
+extern struct kmem_cache *t10_pr_reg_cache;
+
+extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
+			char *, u32);
+extern int target_scsi2_reservation_release(struct se_task *task);
+extern int target_scsi2_reservation_reserve(struct se_task *task);
+extern int core_scsi3_alloc_aptpl_registration(
+			struct t10_reservation *, u64,
+			unsigned char *, unsigned char *, u32,
+			unsigned char *, u16, u32, int, int, u8);
+extern int core_scsi3_check_aptpl_registration(struct se_device *,
+			struct se_portal_group *, struct se_lun *,
+			struct se_lun_acl *);
+extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
+					     struct se_node_acl *);
+extern void core_scsi3_free_all_registrations(struct se_device *);
+extern unsigned char *core_scsi3_pr_dump_type(int);
+
+extern int target_scsi3_emulate_pr_in(struct se_task *task);
+extern int target_scsi3_emulate_pr_out(struct se_task *task);
+extern int core_setup_reservations(struct se_device *, int);
+
+#endif /* TARGET_CORE_PR_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_pscsi.c b/ap/os/linux/linux-3.4.x/drivers/target/target_core_pscsi.c
new file mode 100644
index 0000000..ec7e71c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_pscsi.c
@@ -0,0 +1,1267 @@
+/*******************************************************************************
+ * Filename:  target_core_pscsi.c
+ *
+ * This file contains the generic target mode <-> Linux SCSI subsystem plugin.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/blk_types.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/genhd.h>
+#include <linux/cdrom.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+
+#include "target_core_pscsi.h"
+
+#define ISPRINT(a)  ((a >= ' ') && (a <= '~'))
+
+static struct se_subsystem_api pscsi_template;
+
+static void pscsi_req_done(struct request *, int);
+
+/*	pscsi_attach_hba():
+ *
+ * 	pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
+ *	from the passed SCSI Host ID.
+ */
+static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	struct pscsi_hba_virt *phv;
+
+	phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
+	if (!phv) {
+		pr_err("Unable to allocate struct pscsi_hba_virt\n");
+		return -ENOMEM;
+	}
+	phv->phv_host_id = host_id;
+	phv->phv_mode = PHV_VIRTUAL_HOST_ID;
+
+	hba->hba_ptr = phv;
+
+	pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
+		" Generic Target Core Stack %s\n", hba->hba_id,
+		PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
+	pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n",
+	       hba->hba_id);
+
+	return 0;
+}
+
+static void pscsi_detach_hba(struct se_hba *hba)
+{
+	struct pscsi_hba_virt *phv = hba->hba_ptr;
+	struct Scsi_Host *scsi_host = phv->phv_lld_host;
+
+	if (scsi_host) {
+		scsi_host_put(scsi_host);
+
+		pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from"
+			" Generic Target Core\n", hba->hba_id,
+			(scsi_host->hostt->name) ? (scsi_host->hostt->name) :
+			"Unknown");
+	} else
+		pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA"
+			" from Generic Target Core\n", hba->hba_id);
+
+	kfree(phv);
+	hba->hba_ptr = NULL;
+}
+
+static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
+{
+	struct pscsi_hba_virt *phv = hba->hba_ptr;
+	struct Scsi_Host *sh = phv->phv_lld_host;
+	/*
+	 * Release the struct Scsi_Host
+	 */
+	if (!mode_flag) {
+		if (!sh)
+			return 0;
+
+		phv->phv_lld_host = NULL;
+		phv->phv_mode = PHV_VIRTUAL_HOST_ID;
+
+		pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
+			" %s\n", hba->hba_id, (sh->hostt->name) ?
+			(sh->hostt->name) : "Unknown");
+
+		scsi_host_put(sh);
+		return 0;
+	}
+	/*
+	 * Otherwise, locate struct Scsi_Host from the original passed
+	 * pSCSI Host ID and enable for phba mode
+	 */
+	sh = scsi_host_lookup(phv->phv_host_id);
+	if (!sh) {
+		pr_err("pSCSI: Unable to locate SCSI Host for"
+			" phv_host_id: %d\n", phv->phv_host_id);
+		return -EINVAL;
+	}
+
+	phv->phv_lld_host = sh;
+	phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
+
+	pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
+		hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
+
+	return 1;
+}
+
+static void pscsi_tape_read_blocksize(struct se_device *dev,
+		struct scsi_device *sdev)
+{
+	unsigned char cdb[MAX_COMMAND_SIZE], *buf;
+	int ret;
+
+	buf = kzalloc(12, GFP_KERNEL);
+	if (!buf)
+		return;
+
+	memset(cdb, 0, MAX_COMMAND_SIZE);
+	cdb[0] = MODE_SENSE;
+	cdb[4] = 0x0c; /* 12 bytes */
+
+	ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL,
+			HZ, 1, NULL);
+	if (ret)
+		goto out_free;
+
+	/*
+	 * If MODE_SENSE still returns zero, set the default value to 1024.
+	 */
+	sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
+	if (!sdev->sector_size)
+		sdev->sector_size = 1024;
+out_free:
+	kfree(buf);
+}
+
+static void
+pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn)
+{
+	unsigned char *buf;
+
+	if (sdev->inquiry_len < INQUIRY_LEN)
+		return;
+
+	buf = sdev->inquiry;
+	if (!buf)
+		return;
+	/*
+	 * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev()
+	 */
+	memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor));
+	memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model));
+	memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision));
+}
+
+static int
+pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
+{
+	unsigned char cdb[MAX_COMMAND_SIZE], *buf;
+	int ret;
+
+	buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	memset(cdb, 0, MAX_COMMAND_SIZE);
+	cdb[0] = INQUIRY;
+	cdb[1] = 0x01; /* Query VPD */
+	cdb[2] = 0x80; /* Unit Serial Number */
+	cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff;
+	cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff);
+
+	ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
+			      INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL);
+	if (ret)
+		goto out_free;
+
+	snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
+
+	wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL;
+
+	kfree(buf);
+	return 0;
+
+out_free:
+	kfree(buf);
+	return -EPERM;
+}
+
+static void
+pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
+		struct t10_wwn *wwn)
+{
+	unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83;
+	int ident_len, page_len, off = 4, ret;
+	struct t10_vpd *vpd;
+
+	buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
+	if (!buf)
+		return;
+
+	memset(cdb, 0, MAX_COMMAND_SIZE);
+	cdb[0] = INQUIRY;
+	cdb[1] = 0x01; /* Query VPD */
+	cdb[2] = 0x83; /* Device Identifier */
+	cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff;
+	cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff);
+
+	ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
+			      INQUIRY_VPD_DEVICE_IDENTIFIER_LEN,
+			      NULL, HZ, 1, NULL);
+	if (ret)
+		goto out;
+
+	page_len = (buf[2] << 8) | buf[3];
+	while (page_len > 0) {
+		/* Grab a pointer to the Identification descriptor */
+		page_83 = &buf[off];
+		ident_len = page_83[3];
+		if (!ident_len) {
+			pr_err("page_83[3]: identifier"
+					" length zero!\n");
+			break;
+		}
+		pr_debug("T10 VPD Identifer Length: %d\n", ident_len);
+
+		vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
+		if (!vpd) {
+			pr_err("Unable to allocate memory for"
+					" struct t10_vpd\n");
+			goto out;
+		}
+		INIT_LIST_HEAD(&vpd->vpd_list);
+
+		transport_set_vpd_proto_id(vpd, page_83);
+		transport_set_vpd_assoc(vpd, page_83);
+
+		if (transport_set_vpd_ident_type(vpd, page_83) < 0) {
+			off += (ident_len + 4);
+			page_len -= (ident_len + 4);
+			kfree(vpd);
+			continue;
+		}
+		if (transport_set_vpd_ident(vpd, page_83) < 0) {
+			off += (ident_len + 4);
+			page_len -= (ident_len + 4);
+			kfree(vpd);
+			continue;
+		}
+
+		list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list);
+		off += (ident_len + 4);
+		page_len -= (ident_len + 4);
+	}
+
+out:
+	kfree(buf);
+}
+
+/*	pscsi_add_device_to_list():
+ *
+ *
+ */
+static struct se_device *pscsi_add_device_to_list(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	struct pscsi_dev_virt *pdv,
+	struct scsi_device *sd,
+	int dev_flags)
+{
+	struct se_device *dev;
+	struct se_dev_limits dev_limits;
+	struct request_queue *q;
+	struct queue_limits *limits;
+
+	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+
+	if (!sd->queue_depth) {
+		sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
+
+		pr_err("Set broken SCSI Device %d:%d:%d"
+			" queue_depth to %d\n", sd->channel, sd->id,
+				sd->lun, sd->queue_depth);
+	}
+	/*
+	 * Setup the local scope queue_limits from struct request_queue->limits
+	 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
+	 */
+	q = sd->request_queue;
+	limits = &dev_limits.limits;
+	limits->logical_block_size = sd->sector_size;
+	limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+	limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q));
+	dev_limits.hw_queue_depth = sd->queue_depth;
+	dev_limits.queue_depth = sd->queue_depth;
+	/*
+	 * Setup our standard INQUIRY info into se_dev->t10_wwn
+	 */
+	pscsi_set_inquiry_info(sd, &se_dev->t10_wwn);
+
+	/*
+	 * Set the pointer pdv->pdv_sd to from passed struct scsi_device,
+	 * which has already been referenced with Linux SCSI code with
+	 * scsi_device_get() in this file's pscsi_create_virtdevice().
+	 *
+	 * The passthrough operations called by the transport_add_device_*
+	 * function below will require this pointer to be set for passthroug
+	 *  ops.
+	 *
+	 * For the shutdown case in pscsi_free_device(), this struct
+	 * scsi_device  reference is released with Linux SCSI code
+	 * scsi_device_put() and the pdv->pdv_sd cleared.
+	 */
+	pdv->pdv_sd = sd;
+	dev = transport_add_device_to_core_hba(hba, &pscsi_template,
+				se_dev, dev_flags, pdv,
+				&dev_limits, NULL, NULL);
+	if (!dev) {
+		pdv->pdv_sd = NULL;
+		return NULL;
+	}
+
+	/*
+	 * Locate VPD WWN Information used for various purposes within
+	 * the Storage Engine.
+	 */
+	if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) {
+		/*
+		 * If VPD Unit Serial returned GOOD status, try
+		 * VPD Device Identification page (0x83).
+		 */
+		pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn);
+	}
+
+	/*
+	 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
+	 */
+	if (sd->type == TYPE_TAPE)
+		pscsi_tape_read_blocksize(dev, sd);
+	return dev;
+}
+
+static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+	struct pscsi_dev_virt *pdv;
+
+	pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
+	if (!pdv) {
+		pr_err("Unable to allocate memory for struct pscsi_dev_virt\n");
+		return NULL;
+	}
+	pdv->pdv_se_hba = hba;
+
+	pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name);
+	return pdv;
+}
+
+/*
+ * Called with struct Scsi_Host->host_lock called.
+ */
+static struct se_device *pscsi_create_type_disk(
+	struct scsi_device *sd,
+	struct pscsi_dev_virt *pdv,
+	struct se_subsystem_dev *se_dev,
+	struct se_hba *hba)
+	__releases(sh->host_lock)
+{
+	struct se_device *dev;
+	struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+	struct Scsi_Host *sh = sd->host;
+	struct block_device *bd;
+	u32 dev_flags = 0;
+
+	if (scsi_device_get(sd)) {
+		pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
+			sh->host_no, sd->channel, sd->id, sd->lun);
+		spin_unlock_irq(sh->host_lock);
+		return NULL;
+	}
+	spin_unlock_irq(sh->host_lock);
+	/*
+	 * Claim exclusive struct block_device access to struct scsi_device
+	 * for TYPE_DISK using supplied udev_path
+	 */
+	bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
+				FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
+	if (IS_ERR(bd)) {
+		pr_err("pSCSI: blkdev_get_by_path() failed\n");
+		scsi_device_put(sd);
+		return NULL;
+	}
+	pdv->pdv_bd = bd;
+
+	dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
+	if (!dev) {
+		blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+		scsi_device_put(sd);
+		return NULL;
+	}
+	pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
+		phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
+
+	return dev;
+}
+
+/*
+ * Called with struct Scsi_Host->host_lock called.
+ */
+static struct se_device *pscsi_create_type_rom(
+	struct scsi_device *sd,
+	struct pscsi_dev_virt *pdv,
+	struct se_subsystem_dev *se_dev,
+	struct se_hba *hba)
+	__releases(sh->host_lock)
+{
+	struct se_device *dev;
+	struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+	struct Scsi_Host *sh = sd->host;
+	u32 dev_flags = 0;
+
+	if (scsi_device_get(sd)) {
+		pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
+			sh->host_no, sd->channel, sd->id, sd->lun);
+		spin_unlock_irq(sh->host_lock);
+		return NULL;
+	}
+	spin_unlock_irq(sh->host_lock);
+
+	dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
+	if (!dev) {
+		scsi_device_put(sd);
+		return NULL;
+	}
+	pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+		phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
+		sd->channel, sd->id, sd->lun);
+
+	return dev;
+}
+
+/*
+ *Called with struct Scsi_Host->host_lock called.
+ */
+static struct se_device *pscsi_create_type_other(
+	struct scsi_device *sd,
+	struct pscsi_dev_virt *pdv,
+	struct se_subsystem_dev *se_dev,
+	struct se_hba *hba)
+	__releases(sh->host_lock)
+{
+	struct se_device *dev;
+	struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+	struct Scsi_Host *sh = sd->host;
+	u32 dev_flags = 0;
+
+	spin_unlock_irq(sh->host_lock);
+	dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
+	if (!dev)
+		return NULL;
+
+	pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+		phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
+		sd->channel, sd->id, sd->lun);
+
+	return dev;
+}
+
+static struct se_device *pscsi_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p)
+{
+	struct pscsi_dev_virt *pdv = p;
+	struct se_device *dev;
+	struct scsi_device *sd;
+	struct pscsi_hba_virt *phv = hba->hba_ptr;
+	struct Scsi_Host *sh = phv->phv_lld_host;
+	int legacy_mode_enable = 0;
+
+	if (!pdv) {
+		pr_err("Unable to locate struct pscsi_dev_virt"
+				" parameter\n");
+		return ERR_PTR(-EINVAL);
+	}
+	/*
+	 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
+	 * struct Scsi_Host we will need to bring the TCM/pSCSI object online
+	 */
+	if (!sh) {
+		if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
+			pr_err("pSCSI: Unable to locate struct"
+				" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
+			return ERR_PTR(-ENODEV);
+		}
+		/*
+		 * For the newer PHV_VIRTUAL_HOST_ID struct scsi_device
+		 * reference, we enforce that udev_path has been set
+		 */
+		if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
+			pr_err("pSCSI: udev_path attribute has not"
+				" been set before ENABLE=1\n");
+			return ERR_PTR(-EINVAL);
+		}
+		/*
+		 * If no scsi_host_id= was passed for PHV_VIRTUAL_HOST_ID,
+		 * use the original TCM hba ID to reference Linux/SCSI Host No
+		 * and enable for PHV_LLD_SCSI_HOST_NO mode.
+		 */
+		if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
+			spin_lock(&hba->device_lock);
+			if (!list_empty(&hba->hba_dev_list)) {
+				pr_err("pSCSI: Unable to set hba_mode"
+					" with active devices\n");
+				spin_unlock(&hba->device_lock);
+				return ERR_PTR(-EEXIST);
+			}
+			spin_unlock(&hba->device_lock);
+
+			if (pscsi_pmode_enable_hba(hba, 1) != 1)
+				return ERR_PTR(-ENODEV);
+
+			legacy_mode_enable = 1;
+			hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
+			sh = phv->phv_lld_host;
+		} else {
+			sh = scsi_host_lookup(pdv->pdv_host_id);
+			if (!sh) {
+				pr_err("pSCSI: Unable to locate"
+					" pdv_host_id: %d\n", pdv->pdv_host_id);
+				return ERR_PTR(-EINVAL);
+			}
+			pdv->pdv_lld_host = sh;
+		}
+	} else {
+		if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
+			pr_err("pSCSI: PHV_VIRTUAL_HOST_ID set while"
+				" struct Scsi_Host exists\n");
+			return ERR_PTR(-EEXIST);
+		}
+	}
+
+	spin_lock_irq(sh->host_lock);
+	list_for_each_entry(sd, &sh->__devices, siblings) {
+		if ((pdv->pdv_channel_id != sd->channel) ||
+		    (pdv->pdv_target_id != sd->id) ||
+		    (pdv->pdv_lun_id != sd->lun))
+			continue;
+		/*
+		 * Functions will release the held struct scsi_host->host_lock
+		 * before calling calling pscsi_add_device_to_list() to register
+		 * struct scsi_device with target_core_mod.
+		 */
+		switch (sd->type) {
+		case TYPE_DISK:
+			dev = pscsi_create_type_disk(sd, pdv, se_dev, hba);
+			break;
+		case TYPE_ROM:
+			dev = pscsi_create_type_rom(sd, pdv, se_dev, hba);
+			break;
+		default:
+			dev = pscsi_create_type_other(sd, pdv, se_dev, hba);
+			break;
+		}
+
+		if (!dev) {
+			if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
+				scsi_host_put(sh);
+			else if (legacy_mode_enable) {
+				pscsi_pmode_enable_hba(hba, 0);
+				hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+			}
+			pdv->pdv_sd = NULL;
+			return ERR_PTR(-ENODEV);
+		}
+		return dev;
+	}
+	spin_unlock_irq(sh->host_lock);
+
+	pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
+		pdv->pdv_channel_id,  pdv->pdv_target_id, pdv->pdv_lun_id);
+
+	if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
+		scsi_host_put(sh);
+	else if (legacy_mode_enable) {
+		pscsi_pmode_enable_hba(hba, 0);
+		hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+	}
+
+	return ERR_PTR(-ENODEV);
+}
+
+/*	pscsi_free_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void pscsi_free_device(void *p)
+{
+	struct pscsi_dev_virt *pdv = p;
+	struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+
+	if (sd) {
+		/*
+		 * Release exclusive pSCSI internal struct block_device claim for
+		 * struct scsi_device with TYPE_DISK from pscsi_create_type_disk()
+		 */
+		if ((sd->type == TYPE_DISK) && pdv->pdv_bd) {
+			blkdev_put(pdv->pdv_bd,
+				   FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+			pdv->pdv_bd = NULL;
+		}
+		/*
+		 * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
+		 * to struct Scsi_Host now.
+		 */
+		if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
+		    (phv->phv_lld_host != NULL))
+			scsi_host_put(phv->phv_lld_host);
+		else if (pdv->pdv_lld_host)
+			scsi_host_put(pdv->pdv_lld_host);
+
+		if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
+			scsi_device_put(sd);
+
+		pdv->pdv_sd = NULL;
+	}
+
+	kfree(pdv);
+}
+
+static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task)
+{
+	return container_of(task, struct pscsi_plugin_task, pscsi_task);
+}
+
+
+/*	pscsi_transport_complete():
+ *
+ *
+ */
+static int pscsi_transport_complete(struct se_task *task)
+{
+	struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+	int result;
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	unsigned char *cdb = &pt->pscsi_cdb[0];
+
+	result = pt->pscsi_result;
+	/*
+	 * Hack to make sure that Write-Protect modepage is set if R/O mode is
+	 * forced.
+	 */
+	if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
+	     (status_byte(result) << 1) == SAM_STAT_GOOD) {
+		if (!task->task_se_cmd->se_deve)
+			goto after_mode_sense;
+
+		if (task->task_se_cmd->se_deve->lun_flags &
+				TRANSPORT_LUNFLAGS_READ_ONLY) {
+			unsigned char *buf = transport_kmap_data_sg(task->task_se_cmd);
+
+			if (cdb[0] == MODE_SENSE_10) {
+				if (!(buf[3] & 0x80))
+					buf[3] |= 0x80;
+			} else {
+				if (!(buf[2] & 0x80))
+					buf[2] |= 0x80;
+			}
+
+			transport_kunmap_data_sg(task->task_se_cmd);
+		}
+	}
+after_mode_sense:
+
+	if (sd->type != TYPE_TAPE)
+		goto after_mode_select;
+
+	/*
+	 * Hack to correctly obtain the initiator requested blocksize for
+	 * TYPE_TAPE.  Since this value is dependent upon each tape media,
+	 * struct scsi_device->sector_size will not contain the correct value
+	 * by default, so we go ahead and set it so
+	 * TRANSPORT(dev)->get_blockdev() returns the correct value to the
+	 * storage engine.
+	 */
+	if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
+	      (status_byte(result) << 1) == SAM_STAT_GOOD) {
+		unsigned char *buf;
+		struct scatterlist *sg = task->task_sg;
+		u16 bdl;
+		u32 blocksize;
+
+		buf = sg_virt(&sg[0]);
+		if (!buf) {
+			pr_err("Unable to get buf for scatterlist\n");
+			goto after_mode_select;
+		}
+
+		if (cdb[0] == MODE_SELECT)
+			bdl = (buf[3]);
+		else
+			bdl = (buf[6] << 8) | (buf[7]);
+
+		if (!bdl)
+			goto after_mode_select;
+
+		if (cdb[0] == MODE_SELECT)
+			blocksize = (buf[9] << 16) | (buf[10] << 8) |
+					(buf[11]);
+		else
+			blocksize = (buf[13] << 16) | (buf[14] << 8) |
+					(buf[15]);
+
+		sd->sector_size = blocksize;
+	}
+after_mode_select:
+
+	if (status_byte(result) & CHECK_CONDITION)
+		return 1;
+
+	return 0;
+}
+
+static struct se_task *
+pscsi_alloc_task(unsigned char *cdb)
+{
+	struct pscsi_plugin_task *pt;
+
+	/*
+	 * Dynamically alloc cdb space, since it may be larger than
+	 * TCM_MAX_COMMAND_SIZE
+	 */
+	pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL);
+	if (!pt) {
+		pr_err("Unable to allocate struct pscsi_plugin_task\n");
+		return NULL;
+	}
+
+	return &pt->pscsi_task;
+}
+
+static void pscsi_free_task(struct se_task *task)
+{
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+
+	/*
+	 * We do not release the bio(s) here associated with this task, as
+	 * this is handled by bio_put() and pscsi_bi_endio().
+	 */
+	kfree(pt);
+}
+
+enum {
+	Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id,
+	Opt_scsi_lun_id, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_scsi_host_id, "scsi_host_id=%d"},
+	{Opt_scsi_channel_id, "scsi_channel_id=%d"},
+	{Opt_scsi_target_id, "scsi_target_id=%d"},
+	{Opt_scsi_lun_id, "scsi_lun_id=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	const char *page,
+	ssize_t count)
+{
+	struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+	struct pscsi_hba_virt *phv = hba->hba_ptr;
+	char *orig, *ptr, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, arg, token;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",\n")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_scsi_host_id:
+			if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
+				pr_err("PSCSI[%d]: Unable to accept"
+					" scsi_host_id while phv_mode =="
+					" PHV_LLD_SCSI_HOST_NO\n",
+					phv->phv_host_id);
+				ret = -EINVAL;
+				goto out;
+			}
+			match_int(args, &arg);
+			pdv->pdv_host_id = arg;
+			pr_debug("PSCSI[%d]: Referencing SCSI Host ID:"
+				" %d\n", phv->phv_host_id, pdv->pdv_host_id);
+			pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
+			break;
+		case Opt_scsi_channel_id:
+			match_int(args, &arg);
+			pdv->pdv_channel_id = arg;
+			pr_debug("PSCSI[%d]: Referencing SCSI Channel"
+				" ID: %d\n",  phv->phv_host_id,
+				pdv->pdv_channel_id);
+			pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
+			break;
+		case Opt_scsi_target_id:
+			match_int(args, &arg);
+			pdv->pdv_target_id = arg;
+			pr_debug("PSCSI[%d]: Referencing SCSI Target"
+				" ID: %d\n", phv->phv_host_id,
+				pdv->pdv_target_id);
+			pdv->pdv_flags |= PDF_HAS_TARGET_ID;
+			break;
+		case Opt_scsi_lun_id:
+			match_int(args, &arg);
+			pdv->pdv_lun_id = arg;
+			pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:"
+				" %d\n", phv->phv_host_id, pdv->pdv_lun_id);
+			pdv->pdv_flags |= PDF_HAS_LUN_ID;
+			break;
+		default:
+			break;
+		}
+	}
+
+out:
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t pscsi_check_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev)
+{
+	struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+
+	if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
+	    !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
+	    !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
+		pr_err("Missing scsi_channel_id=, scsi_target_id= and"
+			" scsi_lun_id= parameters\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba,
+					      struct se_subsystem_dev *se_dev,
+					      char *b)
+{
+	struct pscsi_hba_virt *phv = hba->hba_ptr;
+        struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+	unsigned char host_id[16];
+	ssize_t bl;
+	int i;
+
+	if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
+		snprintf(host_id, 16, "%d", pdv->pdv_host_id);
+	else
+		snprintf(host_id, 16, "PHBA Mode");
+
+	bl = sprintf(b, "SCSI Device Bus Location:"
+		" Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n",
+		pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id,
+		host_id);
+
+	if (sd) {
+		bl += sprintf(b + bl, "        ");
+		bl += sprintf(b + bl, "Vendor: ");
+		for (i = 0; i < 8; i++) {
+			if (ISPRINT(sd->vendor[i]))   /* printable character? */
+				bl += sprintf(b + bl, "%c", sd->vendor[i]);
+			else
+				bl += sprintf(b + bl, " ");
+		}
+		bl += sprintf(b + bl, " Model: ");
+		for (i = 0; i < 16; i++) {
+			if (ISPRINT(sd->model[i]))   /* printable character ? */
+				bl += sprintf(b + bl, "%c", sd->model[i]);
+			else
+				bl += sprintf(b + bl, " ");
+		}
+		bl += sprintf(b + bl, " Rev: ");
+		for (i = 0; i < 4; i++) {
+			if (ISPRINT(sd->rev[i]))   /* printable character ? */
+				bl += sprintf(b + bl, "%c", sd->rev[i]);
+			else
+				bl += sprintf(b + bl, " ");
+		}
+		bl += sprintf(b + bl, "\n");
+	}
+	return bl;
+}
+
+static void pscsi_bi_endio(struct bio *bio, int error)
+{
+	bio_put(bio);
+}
+
+static inline struct bio *pscsi_get_bio(int sg_num)
+{
+	struct bio *bio;
+	/*
+	 * Use bio_malloc() following the comment in for bio -> struct request
+	 * in block/blk-core.c:blk_make_request()
+	 */
+	bio = bio_kmalloc(GFP_KERNEL, sg_num);
+	if (!bio) {
+		pr_err("PSCSI: bio_kmalloc() failed\n");
+		return NULL;
+	}
+	bio->bi_end_io = pscsi_bi_endio;
+
+	return bio;
+}
+
+static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
+		struct bio **hbio)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
+	u32 task_sg_num = task->task_sg_nents;
+	struct bio *bio = NULL, *tbio = NULL;
+	struct page *page;
+	struct scatterlist *sg;
+	u32 data_len = task->task_size, i, len, bytes, off;
+	int nr_pages = (task->task_size + task_sg[0].offset +
+			PAGE_SIZE - 1) >> PAGE_SHIFT;
+	int nr_vecs = 0, rc;
+	int rw = (task->task_data_direction == DMA_TO_DEVICE);
+
+	*hbio = NULL;
+
+	pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
+
+	for_each_sg(task_sg, sg, task_sg_num, i) {
+		page = sg_page(sg);
+		off = sg->offset;
+		len = sg->length;
+
+		pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i,
+			page, len, off);
+
+		while (len > 0 && data_len > 0) {
+			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
+			bytes = min(bytes, data_len);
+
+			if (!bio) {
+				nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
+				nr_pages -= nr_vecs;
+				/*
+				 * Calls bio_kmalloc() and sets bio->bi_end_io()
+				 */
+				bio = pscsi_get_bio(nr_vecs);
+				if (!bio)
+					goto fail;
+
+				if (rw)
+					bio->bi_rw |= REQ_WRITE;
+
+				pr_debug("PSCSI: Allocated bio: %p,"
+					" dir: %s nr_vecs: %d\n", bio,
+					(rw) ? "rw" : "r", nr_vecs);
+				/*
+				 * Set *hbio pointer to handle the case:
+				 * nr_pages > BIO_MAX_PAGES, where additional
+				 * bios need to be added to complete a given
+				 * struct se_task
+				 */
+				if (!*hbio)
+					*hbio = tbio = bio;
+				else
+					tbio = tbio->bi_next = bio;
+			}
+
+			pr_debug("PSCSI: Calling bio_add_pc_page() i: %d"
+				" bio: %p page: %p len: %d off: %d\n", i, bio,
+				page, len, off);
+
+			rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
+					bio, page, bytes, off);
+			if (rc != bytes)
+				goto fail;
+
+			pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
+				bio->bi_vcnt, nr_vecs);
+
+			if (bio->bi_vcnt > nr_vecs) {
+				pr_debug("PSCSI: Reached bio->bi_vcnt max:"
+					" %d i: %d bio: %p, allocating another"
+					" bio\n", bio->bi_vcnt, i, bio);
+				/*
+				 * Clear the pointer so that another bio will
+				 * be allocated with pscsi_get_bio() above, the
+				 * current bio has already been set *tbio and
+				 * bio->bi_next.
+				 */
+				bio = NULL;
+			}
+
+			len -= bytes;
+			data_len -= bytes;
+			off = 0;
+		}
+	}
+
+	return task->task_sg_nents;
+fail:
+	while (*hbio) {
+		bio = *hbio;
+		*hbio = (*hbio)->bi_next;
+		bio->bi_next = NULL;
+		bio_endio(bio, 0);	/* XXX: should be error */
+	}
+	cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	return -ENOMEM;
+}
+
+static int pscsi_do_task(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	struct request *req;
+	struct bio *hbio;
+	int ret;
+
+	target_get_task_cdb(task, pt->pscsi_cdb);
+
+	if (task->task_se_cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
+		req = blk_get_request(pdv->pdv_sd->request_queue,
+				(task->task_data_direction == DMA_TO_DEVICE),
+				GFP_KERNEL);
+		if (!req || IS_ERR(req)) {
+			pr_err("PSCSI: blk_get_request() failed: %ld\n",
+					req ? IS_ERR(req) : -ENOMEM);
+			cmd->scsi_sense_reason =
+				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+			return -ENODEV;
+		}
+	} else {
+		BUG_ON(!task->task_size);
+
+		/*
+		 * Setup the main struct request for the task->task_sg[] payload
+		 */
+		ret = pscsi_map_sg(task, task->task_sg, &hbio);
+		if (ret < 0) {
+			cmd->scsi_sense_reason =
+				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+			return ret;
+		}
+
+		req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
+				       GFP_KERNEL);
+		if (IS_ERR(req)) {
+			pr_err("pSCSI: blk_make_request() failed\n");
+			goto fail;
+		}
+	}
+
+	req->cmd_type = REQ_TYPE_BLOCK_PC;
+	req->end_io = pscsi_req_done;
+	req->end_io_data = task;
+	req->cmd_len = scsi_command_size(pt->pscsi_cdb);
+	req->cmd = &pt->pscsi_cdb[0];
+	req->sense = &pt->pscsi_sense[0];
+	req->sense_len = 0;
+	if (pdv->pdv_sd->type == TYPE_DISK)
+		req->timeout = PS_TIMEOUT_DISK;
+	else
+		req->timeout = PS_TIMEOUT_OTHER;
+	req->retries = PS_RETRY;
+
+	blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req,
+			(task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
+			pscsi_req_done);
+
+	return 0;
+
+fail:
+	while (hbio) {
+		struct bio *bio = hbio;
+		hbio = hbio->bi_next;
+		bio->bi_next = NULL;
+		bio_endio(bio, 0);	/* XXX: should be error */
+	}
+	cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	return -ENOMEM;
+}
+
+/*	pscsi_get_sense_buffer():
+ *
+ *
+ */
+static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
+{
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+
+	return pt->pscsi_sense;
+}
+
+/*	pscsi_get_device_rev():
+ *
+ *
+ */
+static u32 pscsi_get_device_rev(struct se_device *dev)
+{
+	struct pscsi_dev_virt *pdv = dev->dev_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+
+	return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1;
+}
+
+/*	pscsi_get_device_type():
+ *
+ *
+ */
+static u32 pscsi_get_device_type(struct se_device *dev)
+{
+	struct pscsi_dev_virt *pdv = dev->dev_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+
+	return (sd) ? sd->type : TYPE_NO_LUN;
+}
+
+static sector_t pscsi_get_blocks(struct se_device *dev)
+{
+	struct pscsi_dev_virt *pdv = dev->dev_ptr;
+
+	if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
+		return pdv->pdv_bd->bd_part->nr_sects;
+
+	dump_stack();
+	return 0;
+}
+
+/*	pscsi_handle_SAM_STATUS_failures():
+ *
+ *
+ */
+static inline void pscsi_process_SAM_status(
+	struct se_task *task,
+	struct pscsi_plugin_task *pt)
+{
+	task->task_scsi_status = status_byte(pt->pscsi_result);
+	if (task->task_scsi_status) {
+		task->task_scsi_status <<= 1;
+		pr_debug("PSCSI Status Byte exception at task: %p CDB:"
+			" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
+			pt->pscsi_result);
+	}
+
+	switch (host_byte(pt->pscsi_result)) {
+	case DID_OK:
+		transport_complete_task(task, (!task->task_scsi_status));
+		break;
+	default:
+		pr_debug("PSCSI Host Byte exception at task: %p CDB:"
+			" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
+			pt->pscsi_result);
+		task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
+		task->task_se_cmd->scsi_sense_reason =
+					TCM_UNSUPPORTED_SCSI_OPCODE;
+		transport_complete_task(task, 0);
+		break;
+	}
+}
+
+static void pscsi_req_done(struct request *req, int uptodate)
+{
+	struct se_task *task = req->end_io_data;
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+
+	pt->pscsi_result = req->errors;
+	pt->pscsi_resid = req->resid_len;
+
+	pscsi_process_SAM_status(task, pt);
+	__blk_put_request(req->q, req);
+}
+
+static struct se_subsystem_api pscsi_template = {
+	.name			= "pscsi",
+	.owner			= THIS_MODULE,
+	.transport_type		= TRANSPORT_PLUGIN_PHBA_PDEV,
+	.attach_hba		= pscsi_attach_hba,
+	.detach_hba		= pscsi_detach_hba,
+	.pmode_enable_hba	= pscsi_pmode_enable_hba,
+	.allocate_virtdevice	= pscsi_allocate_virtdevice,
+	.create_virtdevice	= pscsi_create_virtdevice,
+	.free_device		= pscsi_free_device,
+	.transport_complete	= pscsi_transport_complete,
+	.alloc_task		= pscsi_alloc_task,
+	.do_task		= pscsi_do_task,
+	.free_task		= pscsi_free_task,
+	.check_configfs_dev_params = pscsi_check_configfs_dev_params,
+	.set_configfs_dev_params = pscsi_set_configfs_dev_params,
+	.show_configfs_dev_params = pscsi_show_configfs_dev_params,
+	.get_sense_buffer	= pscsi_get_sense_buffer,
+	.get_device_rev		= pscsi_get_device_rev,
+	.get_device_type	= pscsi_get_device_type,
+	.get_blocks		= pscsi_get_blocks,
+};
+
+static int __init pscsi_module_init(void)
+{
+	return transport_subsystem_register(&pscsi_template);
+}
+
+static void pscsi_module_exit(void)
+{
+	transport_subsystem_release(&pscsi_template);
+}
+
+MODULE_DESCRIPTION("TCM PSCSI subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(pscsi_module_init);
+module_exit(pscsi_module_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_pscsi.h b/ap/os/linux/linux-3.4.x/drivers/target/target_core_pscsi.h
new file mode 100644
index 0000000..6c23c94
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_pscsi.h
@@ -0,0 +1,63 @@
+#ifndef TARGET_CORE_PSCSI_H
+#define TARGET_CORE_PSCSI_H
+
+#define PSCSI_VERSION		"v4.0"
+
+/* used in pscsi_find_alloc_len() */
+#ifndef INQUIRY_DATA_SIZE
+#define INQUIRY_DATA_SIZE	0x24
+#endif
+
+/* used in pscsi_add_device_to_list() */
+#define PSCSI_DEFAULT_QUEUEDEPTH	1
+
+#define PS_RETRY		5
+#define PS_TIMEOUT_DISK		(15*HZ)
+#define PS_TIMEOUT_OTHER	(500*HZ)
+
+#include <linux/device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_device.h>
+#include <linux/kref.h>
+#include <linux/kobject.h>
+
+struct pscsi_plugin_task {
+	struct se_task pscsi_task;
+	unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
+	int	pscsi_direction;
+	int	pscsi_result;
+	u32	pscsi_resid;
+	unsigned char pscsi_cdb[0];
+} ____cacheline_aligned;
+
+#define PDF_HAS_CHANNEL_ID	0x01
+#define PDF_HAS_TARGET_ID	0x02
+#define PDF_HAS_LUN_ID		0x04
+#define PDF_HAS_VPD_UNIT_SERIAL 0x08
+#define PDF_HAS_VPD_DEV_IDENT	0x10
+#define PDF_HAS_VIRT_HOST_ID	0x20
+
+struct pscsi_dev_virt {
+	int	pdv_flags;
+	int	pdv_host_id;
+	int	pdv_channel_id;
+	int	pdv_target_id;
+	int	pdv_lun_id;
+	struct block_device *pdv_bd;
+	struct scsi_device *pdv_sd;
+	struct Scsi_Host *pdv_lld_host;
+	struct se_hba *pdv_se_hba;
+} ____cacheline_aligned;
+
+typedef enum phv_modes {
+	PHV_VIRTUAL_HOST_ID,
+	PHV_LLD_SCSI_HOST_NO
+} phv_modes_t;
+
+struct pscsi_hba_virt {
+	int			phv_host_id;
+	phv_modes_t		phv_mode;
+	struct Scsi_Host	*phv_lld_host;
+} ____cacheline_aligned;
+
+#endif   /*** TARGET_CORE_PSCSI_H ***/
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_rd.c b/ap/os/linux/linux-3.4.x/drivers/target/target_core_rd.c
new file mode 100644
index 0000000..fa4e21b
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_rd.c
@@ -0,0 +1,577 @@
+/*******************************************************************************
+ * Filename:  target_core_rd.c
+ *
+ * This file contains the Storage Engine <-> Ramdisk transport
+ * specific functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+
+#include "target_core_rd.h"
+
+static struct se_subsystem_api rd_mcp_template;
+
+/*	rd_attach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int rd_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	struct rd_host *rd_host;
+
+	rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
+	if (!rd_host) {
+		pr_err("Unable to allocate memory for struct rd_host\n");
+		return -ENOMEM;
+	}
+
+	rd_host->rd_host_id = host_id;
+
+	hba->hba_ptr = rd_host;
+
+	pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
+		" Generic Target Core Stack %s\n", hba->hba_id,
+		RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
+	pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
+		" MaxSectors: %u\n", hba->hba_id,
+		rd_host->rd_host_id, RD_MAX_SECTORS);
+
+	return 0;
+}
+
+static void rd_detach_hba(struct se_hba *hba)
+{
+	struct rd_host *rd_host = hba->hba_ptr;
+
+	pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
+		" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
+
+	kfree(rd_host);
+	hba->hba_ptr = NULL;
+}
+
+/*	rd_release_device_space():
+ *
+ *
+ */
+static void rd_release_device_space(struct rd_dev *rd_dev)
+{
+	u32 i, j, page_count = 0, sg_per_table;
+	struct rd_dev_sg_table *sg_table;
+	struct page *pg;
+	struct scatterlist *sg;
+
+	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
+		return;
+
+	sg_table = rd_dev->sg_table_array;
+
+	for (i = 0; i < rd_dev->sg_table_count; i++) {
+		sg = sg_table[i].sg_table;
+		sg_per_table = sg_table[i].rd_sg_count;
+
+		for (j = 0; j < sg_per_table; j++) {
+			pg = sg_page(&sg[j]);
+			if (pg) {
+				__free_page(pg);
+				page_count++;
+			}
+		}
+
+		kfree(sg);
+	}
+
+	pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
+		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
+		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
+		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
+
+	kfree(sg_table);
+	rd_dev->sg_table_array = NULL;
+	rd_dev->sg_table_count = 0;
+}
+
+
+/*	rd_build_device_space():
+ *
+ *
+ */
+static int rd_build_device_space(struct rd_dev *rd_dev)
+{
+	u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
+	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+				sizeof(struct scatterlist));
+	struct rd_dev_sg_table *sg_table;
+	struct page *pg;
+	struct scatterlist *sg;
+
+	if (rd_dev->rd_page_count <= 0) {
+		pr_err("Illegal page count: %u for Ramdisk device\n",
+			rd_dev->rd_page_count);
+		return -EINVAL;
+	}
+	total_sg_needed = rd_dev->rd_page_count;
+
+	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
+
+	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
+	if (!sg_table) {
+		pr_err("Unable to allocate memory for Ramdisk"
+			" scatterlist tables\n");
+		return -ENOMEM;
+	}
+
+	rd_dev->sg_table_array = sg_table;
+	rd_dev->sg_table_count = sg_tables;
+
+	while (total_sg_needed) {
+		sg_per_table = (total_sg_needed > max_sg_per_table) ?
+			max_sg_per_table : total_sg_needed;
+
+		sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
+				GFP_KERNEL);
+		if (!sg) {
+			pr_err("Unable to allocate scatterlist array"
+				" for struct rd_dev\n");
+			return -ENOMEM;
+		}
+
+		sg_init_table(sg, sg_per_table);
+
+		sg_table[i].sg_table = sg;
+		sg_table[i].rd_sg_count = sg_per_table;
+		sg_table[i].page_start_offset = page_offset;
+		sg_table[i++].page_end_offset = (page_offset + sg_per_table)
+						- 1;
+
+		for (j = 0; j < sg_per_table; j++) {
+			pg = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
+			if (!pg) {
+				pr_err("Unable to allocate scatterlist"
+					" pages for struct rd_dev_sg_table\n");
+				return -ENOMEM;
+			}
+			sg_assign_page(&sg[j], pg);
+			sg[j].length = PAGE_SIZE;
+		}
+
+		page_offset += sg_per_table;
+		total_sg_needed -= sg_per_table;
+	}
+
+	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
+		" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
+		rd_dev->rd_dev_id, rd_dev->rd_page_count,
+		rd_dev->sg_table_count);
+
+	return 0;
+}
+
+static void *rd_allocate_virtdevice(
+	struct se_hba *hba,
+	const char *name,
+	int rd_direct)
+{
+	struct rd_dev *rd_dev;
+	struct rd_host *rd_host = hba->hba_ptr;
+
+	rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
+	if (!rd_dev) {
+		pr_err("Unable to allocate memory for struct rd_dev\n");
+		return NULL;
+	}
+
+	rd_dev->rd_host = rd_host;
+	rd_dev->rd_direct = rd_direct;
+
+	return rd_dev;
+}
+
+static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+	return rd_allocate_virtdevice(hba, name, 0);
+}
+
+/*	rd_create_virtdevice():
+ *
+ *
+ */
+static struct se_device *rd_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p,
+	int rd_direct)
+{
+	struct se_device *dev;
+	struct se_dev_limits dev_limits;
+	struct rd_dev *rd_dev = p;
+	struct rd_host *rd_host = hba->hba_ptr;
+	int dev_flags = 0, ret;
+	char prod[16], rev[4];
+
+	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+
+	ret = rd_build_device_space(rd_dev);
+	if (ret < 0)
+		goto fail;
+
+	snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
+	snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
+						RD_MCP_VERSION);
+
+	dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
+	dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
+	dev_limits.limits.max_sectors = RD_MAX_SECTORS;
+	dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
+	dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
+
+	dev = transport_add_device_to_core_hba(hba,
+			&rd_mcp_template, se_dev, dev_flags, rd_dev,
+			&dev_limits, prod, rev);
+	if (!dev)
+		goto fail;
+
+	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
+	rd_dev->rd_queue_depth = dev->queue_depth;
+
+	pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
+		" %u pages in %u tables, %lu total bytes\n",
+		rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
+		"DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
+		rd_dev->sg_table_count,
+		(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
+
+	return dev;
+
+fail:
+	rd_release_device_space(rd_dev);
+	return ERR_PTR(ret);
+}
+
+static struct se_device *rd_MEMCPY_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p)
+{
+	return rd_create_virtdevice(hba, se_dev, p, 0);
+}
+
+/*	rd_free_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void rd_free_device(void *p)
+{
+	struct rd_dev *rd_dev = p;
+
+	rd_release_device_space(rd_dev);
+	kfree(rd_dev);
+}
+
+static inline struct rd_request *RD_REQ(struct se_task *task)
+{
+	return container_of(task, struct rd_request, rd_task);
+}
+
+static struct se_task *
+rd_alloc_task(unsigned char *cdb)
+{
+	struct rd_request *rd_req;
+
+	rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
+	if (!rd_req) {
+		pr_err("Unable to allocate struct rd_request\n");
+		return NULL;
+	}
+
+	return &rd_req->rd_task;
+}
+
+/*	rd_get_sg_table():
+ *
+ *
+ */
+static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
+{
+	u32 i;
+	struct rd_dev_sg_table *sg_table;
+
+	for (i = 0; i < rd_dev->sg_table_count; i++) {
+		sg_table = &rd_dev->sg_table_array[i];
+		if ((sg_table->page_start_offset <= page) &&
+		    (sg_table->page_end_offset >= page))
+			return sg_table;
+	}
+
+	pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
+			page);
+
+	return NULL;
+}
+
+static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
+{
+	struct se_task *task = &req->rd_task;
+	struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
+	struct rd_dev_sg_table *table;
+	struct scatterlist *rd_sg;
+	struct sg_mapping_iter m;
+	u32 rd_offset = req->rd_offset;
+	u32 src_len;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!table)
+		return -EINVAL;
+
+	rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
+
+	pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+			dev->rd_dev_id, read_rd ? "Read" : "Write",
+			task->task_lba, req->rd_size, req->rd_page,
+			rd_offset);
+
+	src_len = PAGE_SIZE - rd_offset;
+	sg_miter_start(&m, task->task_sg, task->task_sg_nents,
+			read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
+	while (req->rd_size) {
+		u32 len;
+		void *rd_addr;
+
+		sg_miter_next(&m);
+		len = min((u32)m.length, src_len);
+		m.consumed = len;
+
+		rd_addr = sg_virt(rd_sg) + rd_offset;
+
+		if (read_rd)
+			memcpy(m.addr, rd_addr, len);
+		else
+			memcpy(rd_addr, m.addr, len);
+
+		req->rd_size -= len;
+		if (!req->rd_size)
+			continue;
+
+		src_len -= len;
+		if (src_len) {
+			rd_offset += len;
+			continue;
+		}
+
+		/* rd page completed, next one please */
+		req->rd_page++;
+		rd_offset = 0;
+		src_len = PAGE_SIZE;
+		if (req->rd_page <= table->page_end_offset) {
+			rd_sg++;
+			continue;
+		}
+
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!table) {
+			sg_miter_stop(&m);
+			return -EINVAL;
+		}
+
+		/* since we increment, the first sg entry is correct */
+		rd_sg = table->sg_table;
+	}
+	sg_miter_stop(&m);
+	return 0;
+}
+
+/*	rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int rd_MEMCPY_do_task(struct se_task *task)
+{
+	struct se_device *dev = task->task_se_cmd->se_dev;
+	struct rd_request *req = RD_REQ(task);
+	u64 tmp;
+	int ret;
+
+	tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+	req->rd_offset = do_div(tmp, PAGE_SIZE);
+	req->rd_page = tmp;
+	req->rd_size = task->task_size;
+
+	ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
+	if (ret != 0)
+		return ret;
+
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+	return 0;
+}
+
+/*	rd_free_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void rd_free_task(struct se_task *task)
+{
+	kfree(RD_REQ(task));
+}
+
+enum {
+	Opt_rd_pages, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_rd_pages, "rd_pages=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t rd_set_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	const char *page,
+	ssize_t count)
+{
+	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+	char *orig, *ptr, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, arg, token;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",\n")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_rd_pages:
+			match_int(args, &arg);
+			rd_dev->rd_page_count = arg;
+			pr_debug("RAMDISK: Referencing Page"
+				" Count: %u\n", rd_dev->rd_page_count);
+			rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
+			break;
+		default:
+			break;
+		}
+	}
+
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
+{
+	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+
+	if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
+		pr_debug("Missing rd_pages= parameter\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static ssize_t rd_show_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	char *b)
+{
+	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+	ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: %s\n",
+			rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
+			"rd_direct" : "rd_mcp");
+	bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
+			"  SG_table_count: %u\n", rd_dev->rd_page_count,
+			PAGE_SIZE, rd_dev->sg_table_count);
+	return bl;
+}
+
+static u32 rd_get_device_rev(struct se_device *dev)
+{
+	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+}
+
+static u32 rd_get_device_type(struct se_device *dev)
+{
+	return TYPE_DISK;
+}
+
+static sector_t rd_get_blocks(struct se_device *dev)
+{
+	struct rd_dev *rd_dev = dev->dev_ptr;
+	unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
+			dev->se_sub_dev->se_dev_attrib.block_size) - 1;
+
+	return blocks_long;
+}
+
+static struct se_subsystem_api rd_mcp_template = {
+	.name			= "rd_mcp",
+	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
+	.attach_hba		= rd_attach_hba,
+	.detach_hba		= rd_detach_hba,
+	.allocate_virtdevice	= rd_MEMCPY_allocate_virtdevice,
+	.create_virtdevice	= rd_MEMCPY_create_virtdevice,
+	.free_device		= rd_free_device,
+	.alloc_task		= rd_alloc_task,
+	.do_task		= rd_MEMCPY_do_task,
+	.free_task		= rd_free_task,
+	.check_configfs_dev_params = rd_check_configfs_dev_params,
+	.set_configfs_dev_params = rd_set_configfs_dev_params,
+	.show_configfs_dev_params = rd_show_configfs_dev_params,
+	.get_device_rev		= rd_get_device_rev,
+	.get_device_type	= rd_get_device_type,
+	.get_blocks		= rd_get_blocks,
+};
+
+int __init rd_module_init(void)
+{
+	int ret;
+
+	ret = transport_subsystem_register(&rd_mcp_template);
+	if (ret < 0) {
+		return ret;
+	}
+
+	return 0;
+}
+
+void rd_module_exit(void)
+{
+	transport_subsystem_release(&rd_mcp_template);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_rd.h b/ap/os/linux/linux-3.4.x/drivers/target/target_core_rd.h
new file mode 100644
index 0000000..784e56a
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_rd.h
@@ -0,0 +1,65 @@
+#ifndef TARGET_CORE_RD_H
+#define TARGET_CORE_RD_H
+
+#define RD_HBA_VERSION		"v4.0"
+#define RD_DR_VERSION		"4.0"
+#define RD_MCP_VERSION		"4.0"
+
+/* Largest piece of memory kmalloc can allocate */
+#define RD_MAX_ALLOCATION_SIZE	65536
+#define RD_DEVICE_QUEUE_DEPTH	32
+#define RD_MAX_DEVICE_QUEUE_DEPTH 128
+#define RD_BLOCKSIZE		512
+#define RD_MAX_SECTORS		1024
+
+/* Used in target_core_init_configfs() for virtual LUN 0 access */
+int __init rd_module_init(void);
+void rd_module_exit(void);
+
+#define RRF_EMULATE_CDB		0x01
+#define RRF_GOT_LBA		0x02
+
+struct rd_request {
+	struct se_task	rd_task;
+
+	/* Offset from start of page */
+	u32		rd_offset;
+	/* Starting page in Ramdisk for request */
+	u32		rd_page;
+	/* Total number of pages needed for request */
+	u32		rd_page_count;
+	/* Scatterlist count */
+	u32		rd_size;
+} ____cacheline_aligned;
+
+struct rd_dev_sg_table {
+	u32		page_start_offset;
+	u32		page_end_offset;
+	u32		rd_sg_count;
+	struct scatterlist *sg_table;
+} ____cacheline_aligned;
+
+#define RDF_HAS_PAGE_COUNT	0x01
+
+struct rd_dev {
+	int		rd_direct;
+	u32		rd_flags;
+	/* Unique Ramdisk Device ID in Ramdisk HBA */
+	u32		rd_dev_id;
+	/* Total page count for ramdisk device */
+	u32		rd_page_count;
+	/* Number of SG tables in sg_table_array */
+	u32		sg_table_count;
+	u32		rd_queue_depth;
+	/* Array of rd_dev_sg_table_t containing scatterlists */
+	struct rd_dev_sg_table *sg_table_array;
+	/* Ramdisk HBA device is connected to */
+	struct rd_host *rd_host;
+} ____cacheline_aligned;
+
+struct rd_host {
+	u32		rd_host_dev_id_count;
+	u32		rd_host_id;		/* Unique Ramdisk Host ID */
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_RD_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_stat.c b/ap/os/linux/linux-3.4.x/drivers/target/target_core_stat.c
new file mode 100644
index 0000000..3d44beb
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_stat.c
@@ -0,0 +1,1794 @@
+/*******************************************************************************
+ * Filename:  target_core_stat.c
+ *
+ * Copyright (c) 2011 Rising Tide Systems
+ * Copyright (c) 2011 Linux-iSCSI.org
+ *
+ * Modern ConfigFS group context specific statistics based on original
+ * target_core_mib.c code
+ *
+ * Copyright (c) 2006-2007 SBE, Inc.  All Rights Reserved.
+ *
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/blkdev.h>
+#include <linux/configfs.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "target_core_internal.h"
+
+#ifndef INITIAL_JIFFIES
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+#endif
+
+#define NONE		"None"
+#define ISPRINT(a)   ((a >= ' ') && (a <= '~'))
+
+#define SCSI_LU_INDEX			1
+#define LU_COUNT			1
+
+/*
+ * SCSI Device Table
+ */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_dev, se_dev_stat_grps);
+#define DEV_STAT_SCSI_DEV_ATTR(_name, _mode)				\
+static struct target_stat_scsi_dev_attribute				\
+			target_stat_scsi_dev_##_name =			\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_stat_scsi_dev_show_attr_##_name,				\
+	target_stat_scsi_dev_store_attr_##_name);
+
+#define DEV_STAT_SCSI_DEV_ATTR_RO(_name)				\
+static struct target_stat_scsi_dev_attribute				\
+			target_stat_scsi_dev_##_name =			\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_stat_scsi_dev_show_attr_##_name);
+
+static ssize_t target_stat_scsi_dev_show_attr_inst(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_hba *hba = se_subdev->se_dev_hba;
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+}
+DEV_STAT_SCSI_DEV_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_dev_show_attr_indx(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+}
+DEV_STAT_SCSI_DEV_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_dev_show_attr_role(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "Target\n");
+}
+DEV_STAT_SCSI_DEV_ATTR_RO(role);
+
+static ssize_t target_stat_scsi_dev_show_attr_ports(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count);
+}
+DEV_STAT_SCSI_DEV_ATTR_RO(ports);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_dev, se_dev_stat_grps, scsi_dev_group);
+
+static struct configfs_attribute *target_stat_scsi_dev_attrs[] = {
+	&target_stat_scsi_dev_inst.attr,
+	&target_stat_scsi_dev_indx.attr,
+	&target_stat_scsi_dev_role.attr,
+	&target_stat_scsi_dev_ports.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_dev_attrib_ops = {
+	.show_attribute		= target_stat_scsi_dev_attr_show,
+	.store_attribute	= target_stat_scsi_dev_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_dev_cit = {
+	.ct_item_ops		= &target_stat_scsi_dev_attrib_ops,
+	.ct_attrs		= target_stat_scsi_dev_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * SCSI Target Device Table
+ */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_dev, se_dev_stat_grps);
+#define DEV_STAT_SCSI_TGT_DEV_ATTR(_name, _mode)			\
+static struct target_stat_scsi_tgt_dev_attribute			\
+			target_stat_scsi_tgt_dev_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_stat_scsi_tgt_dev_show_attr_##_name,			\
+	target_stat_scsi_tgt_dev_store_attr_##_name);
+
+#define DEV_STAT_SCSI_TGT_DEV_ATTR_RO(_name)				\
+static struct target_stat_scsi_tgt_dev_attribute			\
+			target_stat_scsi_tgt_dev_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_stat_scsi_tgt_dev_show_attr_##_name);
+
+static ssize_t target_stat_scsi_tgt_dev_show_attr_inst(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_hba *hba = se_subdev->se_dev_hba;
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+}
+DEV_STAT_SCSI_TGT_DEV_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_tgt_dev_show_attr_indx(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+}
+DEV_STAT_SCSI_TGT_DEV_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT);
+}
+DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus);
+
+static ssize_t target_stat_scsi_tgt_dev_show_attr_status(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+	char status[16];
+
+	if (!dev)
+		return -ENODEV;
+
+	switch (dev->dev_status) {
+	case TRANSPORT_DEVICE_ACTIVATED:
+		strcpy(status, "activated");
+		break;
+	case TRANSPORT_DEVICE_DEACTIVATED:
+		strcpy(status, "deactivated");
+		break;
+	case TRANSPORT_DEVICE_SHUTDOWN:
+		strcpy(status, "shutdown");
+		break;
+	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
+	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
+		strcpy(status, "offline");
+		break;
+	default:
+		sprintf(status, "unknown(%d)", dev->dev_status);
+		break;
+	}
+
+	return snprintf(page, PAGE_SIZE, "%s\n", status);
+}
+DEV_STAT_SCSI_TGT_DEV_ATTR_RO(status);
+
+static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+	int non_accessible_lus;
+
+	if (!dev)
+		return -ENODEV;
+
+	switch (dev->dev_status) {
+	case TRANSPORT_DEVICE_ACTIVATED:
+		non_accessible_lus = 0;
+		break;
+	case TRANSPORT_DEVICE_DEACTIVATED:
+	case TRANSPORT_DEVICE_SHUTDOWN:
+	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
+	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
+	default:
+		non_accessible_lus = 1;
+		break;
+	}
+
+	return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus);
+}
+DEV_STAT_SCSI_TGT_DEV_ATTR_RO(non_access_lus);
+
+static ssize_t target_stat_scsi_tgt_dev_show_attr_resets(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
+}
+DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets);
+
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_dev, se_dev_stat_grps, scsi_tgt_dev_group);
+
+static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = {
+	&target_stat_scsi_tgt_dev_inst.attr,
+	&target_stat_scsi_tgt_dev_indx.attr,
+	&target_stat_scsi_tgt_dev_num_lus.attr,
+	&target_stat_scsi_tgt_dev_status.attr,
+	&target_stat_scsi_tgt_dev_non_access_lus.attr,
+	&target_stat_scsi_tgt_dev_resets.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_tgt_dev_attrib_ops = {
+	.show_attribute		= target_stat_scsi_tgt_dev_attr_show,
+	.store_attribute	= target_stat_scsi_tgt_dev_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_tgt_dev_cit = {
+	.ct_item_ops		= &target_stat_scsi_tgt_dev_attrib_ops,
+	.ct_attrs		= target_stat_scsi_tgt_dev_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * SCSI Logical Unit Table
+ */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_lu, se_dev_stat_grps);
+#define DEV_STAT_SCSI_LU_ATTR(_name, _mode)				\
+static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_stat_scsi_lu_show_attr_##_name,				\
+	target_stat_scsi_lu_store_attr_##_name);
+
+#define DEV_STAT_SCSI_LU_ATTR_RO(_name)					\
+static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_stat_scsi_lu_show_attr_##_name);
+
+static ssize_t target_stat_scsi_lu_show_attr_inst(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_hba *hba = se_subdev->se_dev_hba;
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_lu_show_attr_dev(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(dev);
+
+static ssize_t target_stat_scsi_lu_show_attr_indx(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_lu_show_attr_lun(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+	/* FIXME: scsiLuDefaultLun */
+	return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(lun);
+
+static ssize_t target_stat_scsi_lu_show_attr_lu_name(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+	/* scsiLuWwnName */
+	return snprintf(page, PAGE_SIZE, "%s\n",
+			(strlen(dev->se_sub_dev->t10_wwn.unit_serial)) ?
+			dev->se_sub_dev->t10_wwn.unit_serial : "None");
+}
+DEV_STAT_SCSI_LU_ATTR_RO(lu_name);
+
+static ssize_t target_stat_scsi_lu_show_attr_vend(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+	int i;
+	char str[sizeof(dev->se_sub_dev->t10_wwn.vendor)+1];
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuVendorId */
+	for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
+		str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.vendor[i]) ?
+			dev->se_sub_dev->t10_wwn.vendor[i] : ' ';
+	str[i] = '\0';
+	return snprintf(page, PAGE_SIZE, "%s\n", str);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(vend);
+
+static ssize_t target_stat_scsi_lu_show_attr_prod(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+	int i;
+	char str[sizeof(dev->se_sub_dev->t10_wwn.model)+1];
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuProductId */
+	for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
+		str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.model[i]) ?
+			dev->se_sub_dev->t10_wwn.model[i] : ' ';
+	str[i] = '\0';
+	return snprintf(page, PAGE_SIZE, "%s\n", str);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(prod);
+
+static ssize_t target_stat_scsi_lu_show_attr_rev(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+	int i;
+	char str[sizeof(dev->se_sub_dev->t10_wwn.revision)+1];
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuRevisionId */
+	for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.revision); i++)
+		str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.revision[i]) ?
+			dev->se_sub_dev->t10_wwn.revision[i] : ' ';
+	str[i] = '\0';
+	return snprintf(page, PAGE_SIZE, "%s\n", str);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(rev);
+
+static ssize_t target_stat_scsi_lu_show_attr_dev_type(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuPeripheralType */
+	return snprintf(page, PAGE_SIZE, "%u\n",
+			dev->transport->get_device_type(dev));
+}
+DEV_STAT_SCSI_LU_ATTR_RO(dev_type);
+
+static ssize_t target_stat_scsi_lu_show_attr_status(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuStatus */
+	return snprintf(page, PAGE_SIZE, "%s\n",
+		(dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
+		"available" : "notavailable");
+}
+DEV_STAT_SCSI_LU_ATTR_RO(status);
+
+static ssize_t target_stat_scsi_lu_show_attr_state_bit(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuState */
+	return snprintf(page, PAGE_SIZE, "exposed\n");
+}
+DEV_STAT_SCSI_LU_ATTR_RO(state_bit);
+
+static ssize_t target_stat_scsi_lu_show_attr_num_cmds(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuNumCommands */
+	return snprintf(page, PAGE_SIZE, "%llu\n",
+			(unsigned long long)dev->num_cmds);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(num_cmds);
+
+static ssize_t target_stat_scsi_lu_show_attr_read_mbytes(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuReadMegaBytes */
+	return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20));
+}
+DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes);
+
+static ssize_t target_stat_scsi_lu_show_attr_write_mbytes(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuWrittenMegaBytes */
+	return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20));
+}
+DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes);
+
+static ssize_t target_stat_scsi_lu_show_attr_resets(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuInResets */
+	return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(resets);
+
+static ssize_t target_stat_scsi_lu_show_attr_full_stat(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	/* FIXME: scsiLuOutTaskSetFullStatus */
+	return snprintf(page, PAGE_SIZE, "%u\n", 0);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(full_stat);
+
+static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	/* FIXME: scsiLuHSInCommands */
+	return snprintf(page, PAGE_SIZE, "%u\n", 0);
+}
+DEV_STAT_SCSI_LU_ATTR_RO(hs_num_cmds);
+
+static ssize_t target_stat_scsi_lu_show_attr_creation_time(
+	struct se_dev_stat_grps *sgrps, char *page)
+{
+	struct se_subsystem_dev *se_subdev = container_of(sgrps,
+			struct se_subsystem_dev, dev_stat_grps);
+	struct se_device *dev = se_subdev->se_dev_ptr;
+
+	if (!dev)
+		return -ENODEV;
+
+	/* scsiLuCreationTime */
+	return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time -
+				INITIAL_JIFFIES) * 100 / HZ));
+}
+DEV_STAT_SCSI_LU_ATTR_RO(creation_time);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_lu, se_dev_stat_grps, scsi_lu_group);
+
+static struct configfs_attribute *target_stat_scsi_lu_attrs[] = {
+	&target_stat_scsi_lu_inst.attr,
+	&target_stat_scsi_lu_dev.attr,
+	&target_stat_scsi_lu_indx.attr,
+	&target_stat_scsi_lu_lun.attr,
+	&target_stat_scsi_lu_lu_name.attr,
+	&target_stat_scsi_lu_vend.attr,
+	&target_stat_scsi_lu_prod.attr,
+	&target_stat_scsi_lu_rev.attr,
+	&target_stat_scsi_lu_dev_type.attr,
+	&target_stat_scsi_lu_status.attr,
+	&target_stat_scsi_lu_state_bit.attr,
+	&target_stat_scsi_lu_num_cmds.attr,
+	&target_stat_scsi_lu_read_mbytes.attr,
+	&target_stat_scsi_lu_write_mbytes.attr,
+	&target_stat_scsi_lu_resets.attr,
+	&target_stat_scsi_lu_full_stat.attr,
+	&target_stat_scsi_lu_hs_num_cmds.attr,
+	&target_stat_scsi_lu_creation_time.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_lu_attrib_ops = {
+	.show_attribute		= target_stat_scsi_lu_attr_show,
+	.store_attribute	= target_stat_scsi_lu_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_lu_cit = {
+	.ct_item_ops		= &target_stat_scsi_lu_attrib_ops,
+	.ct_attrs		= target_stat_scsi_lu_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * Called from target_core_configfs.c:target_core_make_subdev() to setup
+ * the target statistics groups + configfs CITs located in target_core_stat.c
+ */
+void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev)
+{
+	struct config_group *dev_stat_grp = &se_subdev->dev_stat_grps.stat_group;
+
+	config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_dev_group,
+			"scsi_dev", &target_stat_scsi_dev_cit);
+	config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_tgt_dev_group,
+			"scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit);
+	config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_lu_group,
+			"scsi_lu", &target_stat_scsi_lu_cit);
+
+	dev_stat_grp->default_groups[0] = &se_subdev->dev_stat_grps.scsi_dev_group;
+	dev_stat_grp->default_groups[1] = &se_subdev->dev_stat_grps.scsi_tgt_dev_group;
+	dev_stat_grp->default_groups[2] = &se_subdev->dev_stat_grps.scsi_lu_group;
+	dev_stat_grp->default_groups[3] = NULL;
+}
+
+/*
+ * SCSI Port Table
+ */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_port, se_port_stat_grps);
+#define DEV_STAT_SCSI_PORT_ATTR(_name, _mode)				\
+static struct target_stat_scsi_port_attribute				\
+			target_stat_scsi_port_##_name =			\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_stat_scsi_port_show_attr_##_name,			\
+	target_stat_scsi_port_store_attr_##_name);
+
+#define DEV_STAT_SCSI_PORT_ATTR_RO(_name)				\
+static struct target_stat_scsi_port_attribute				\
+			target_stat_scsi_port_##_name =			\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_stat_scsi_port_show_attr_##_name);
+
+static ssize_t target_stat_scsi_port_show_attr_inst(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	struct se_device *dev = lun->lun_se_dev;
+	struct se_hba *hba;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	hba = dev->se_hba;
+	ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_PORT_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_port_show_attr_dev(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	struct se_device *dev = lun->lun_se_dev;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_PORT_ATTR_RO(dev);
+
+static ssize_t target_stat_scsi_port_show_attr_indx(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_PORT_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_port_show_attr_role(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_device *dev = lun->lun_se_dev;
+	struct se_port *sep;
+	ssize_t ret;
+
+	if (!dev)
+		return -ENODEV;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_PORT_ATTR_RO(role);
+
+static ssize_t target_stat_scsi_port_show_attr_busy_count(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	/* FIXME: scsiPortBusyStatuses  */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_PORT_ATTR_RO(busy_count);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_port, se_port_stat_grps, scsi_port_group);
+
+static struct configfs_attribute *target_stat_scsi_port_attrs[] = {
+	&target_stat_scsi_port_inst.attr,
+	&target_stat_scsi_port_dev.attr,
+	&target_stat_scsi_port_indx.attr,
+	&target_stat_scsi_port_role.attr,
+	&target_stat_scsi_port_busy_count.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_port_attrib_ops = {
+	.show_attribute		= target_stat_scsi_port_attr_show,
+	.store_attribute	= target_stat_scsi_port_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_port_cit = {
+	.ct_item_ops		= &target_stat_scsi_port_attrib_ops,
+	.ct_attrs		= target_stat_scsi_port_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * SCSI Target Port Table
+ */
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_port, se_port_stat_grps);
+#define DEV_STAT_SCSI_TGT_PORT_ATTR(_name, _mode)			\
+static struct target_stat_scsi_tgt_port_attribute			\
+			target_stat_scsi_tgt_port_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_stat_scsi_tgt_port_show_attr_##_name,			\
+	target_stat_scsi_tgt_port_store_attr_##_name);
+
+#define DEV_STAT_SCSI_TGT_PORT_ATTR_RO(_name)				\
+static struct target_stat_scsi_tgt_port_attribute			\
+			target_stat_scsi_tgt_port_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_stat_scsi_tgt_port_show_attr_##_name);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_inst(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_device *dev = lun->lun_se_dev;
+	struct se_port *sep;
+	struct se_hba *hba;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	hba = dev->se_hba;
+	ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_dev(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_device *dev = lun->lun_se_dev;
+	struct se_port *sep;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(dev);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_indx(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_name(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	tpg = sep->sep_tpg;
+
+	ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
+		tpg->se_tpg_tfo->get_fabric_name(), sep->sep_index);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(name);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_port_index(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	tpg = sep->sep_tpg;
+
+	ret = snprintf(page, PAGE_SIZE, "%s%s%d\n",
+		tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+",
+		tpg->se_tpg_tfo->tpg_get_tag(tpg));
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(port_index);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_in_cmds(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+
+	ret = snprintf(page, PAGE_SIZE, "%llu\n", sep->sep_stats.cmd_pdus);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(in_cmds);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_write_mbytes(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+
+	ret = snprintf(page, PAGE_SIZE, "%u\n",
+			(u32)(sep->sep_stats.rx_data_octets >> 20));
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(write_mbytes);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_read_mbytes(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+
+	ret = snprintf(page, PAGE_SIZE, "%u\n",
+			(u32)(sep->sep_stats.tx_data_octets >> 20));
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(read_mbytes);
+
+static ssize_t target_stat_scsi_tgt_port_show_attr_hs_in_cmds(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+
+	/* FIXME: scsiTgtPortHsInCommands */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TGT_PORT_ATTR_RO(hs_in_cmds);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_port, se_port_stat_grps,
+		scsi_tgt_port_group);
+
+static struct configfs_attribute *target_stat_scsi_tgt_port_attrs[] = {
+	&target_stat_scsi_tgt_port_inst.attr,
+	&target_stat_scsi_tgt_port_dev.attr,
+	&target_stat_scsi_tgt_port_indx.attr,
+	&target_stat_scsi_tgt_port_name.attr,
+	&target_stat_scsi_tgt_port_port_index.attr,
+	&target_stat_scsi_tgt_port_in_cmds.attr,
+	&target_stat_scsi_tgt_port_write_mbytes.attr,
+	&target_stat_scsi_tgt_port_read_mbytes.attr,
+	&target_stat_scsi_tgt_port_hs_in_cmds.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_tgt_port_attrib_ops = {
+	.show_attribute		= target_stat_scsi_tgt_port_attr_show,
+	.store_attribute	= target_stat_scsi_tgt_port_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_tgt_port_cit = {
+	.ct_item_ops		= &target_stat_scsi_tgt_port_attrib_ops,
+	.ct_attrs		= target_stat_scsi_tgt_port_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * SCSI Transport Table
+o */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_transport, se_port_stat_grps);
+#define DEV_STAT_SCSI_TRANSPORT_ATTR(_name, _mode)			\
+static struct target_stat_scsi_transport_attribute			\
+			target_stat_scsi_transport_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_stat_scsi_transport_show_attr_##_name,			\
+	target_stat_scsi_transport_store_attr_##_name);
+
+#define DEV_STAT_SCSI_TRANSPORT_ATTR_RO(_name)				\
+static struct target_stat_scsi_transport_attribute			\
+			target_stat_scsi_transport_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_stat_scsi_transport_show_attr_##_name);
+
+static ssize_t target_stat_scsi_transport_show_attr_inst(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_device *dev = lun->lun_se_dev;
+	struct se_port *sep;
+	struct se_hba *hba;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+
+	hba = dev->se_hba;
+	ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TRANSPORT_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_transport_show_attr_device(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	tpg = sep->sep_tpg;
+	/* scsiTransportType */
+	ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
+			tpg->se_tpg_tfo->get_fabric_name());
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TRANSPORT_ATTR_RO(device);
+
+static ssize_t target_stat_scsi_transport_show_attr_indx(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_port *sep;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	tpg = sep->sep_tpg;
+	ret = snprintf(page, PAGE_SIZE, "%u\n",
+			tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TRANSPORT_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_transport_show_attr_dev_name(
+	struct se_port_stat_grps *pgrps, char *page)
+{
+	struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
+	struct se_device *dev = lun->lun_se_dev;
+	struct se_port *sep;
+	struct se_portal_group *tpg;
+	struct t10_wwn *wwn;
+	ssize_t ret;
+
+	spin_lock(&lun->lun_sep_lock);
+	sep = lun->lun_sep;
+	if (!sep) {
+		spin_unlock(&lun->lun_sep_lock);
+		return -ENODEV;
+	}
+	tpg = sep->sep_tpg;
+	wwn = &dev->se_sub_dev->t10_wwn;
+	/* scsiTransportDevName */
+	ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
+			tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+			(strlen(wwn->unit_serial)) ? wwn->unit_serial :
+			wwn->vendor);
+	spin_unlock(&lun->lun_sep_lock);
+	return ret;
+}
+DEV_STAT_SCSI_TRANSPORT_ATTR_RO(dev_name);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_transport, se_port_stat_grps,
+		scsi_transport_group);
+
+static struct configfs_attribute *target_stat_scsi_transport_attrs[] = {
+	&target_stat_scsi_transport_inst.attr,
+	&target_stat_scsi_transport_device.attr,
+	&target_stat_scsi_transport_indx.attr,
+	&target_stat_scsi_transport_dev_name.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_transport_attrib_ops = {
+	.show_attribute		= target_stat_scsi_transport_attr_show,
+	.store_attribute	= target_stat_scsi_transport_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_transport_cit = {
+	.ct_item_ops		= &target_stat_scsi_transport_attrib_ops,
+	.ct_attrs		= target_stat_scsi_transport_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * Called from target_core_fabric_configfs.c:target_fabric_make_lun() to setup
+ * the target port statistics groups + configfs CITs located in target_core_stat.c
+ */
+void target_stat_setup_port_default_groups(struct se_lun *lun)
+{
+	struct config_group *port_stat_grp = &lun->port_stat_grps.stat_group;
+
+	config_group_init_type_name(&lun->port_stat_grps.scsi_port_group,
+			"scsi_port", &target_stat_scsi_port_cit);
+	config_group_init_type_name(&lun->port_stat_grps.scsi_tgt_port_group,
+			"scsi_tgt_port", &target_stat_scsi_tgt_port_cit);
+	config_group_init_type_name(&lun->port_stat_grps.scsi_transport_group,
+			"scsi_transport", &target_stat_scsi_transport_cit);
+
+	port_stat_grp->default_groups[0] = &lun->port_stat_grps.scsi_port_group;
+	port_stat_grp->default_groups[1] = &lun->port_stat_grps.scsi_tgt_port_group;
+	port_stat_grp->default_groups[2] = &lun->port_stat_grps.scsi_transport_group;
+	port_stat_grp->default_groups[3] = NULL;
+}
+
+/*
+ * SCSI Authorized Initiator Table
+ */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_auth_intr, se_ml_stat_grps);
+#define DEV_STAT_SCSI_AUTH_INTR_ATTR(_name, _mode)			\
+static struct target_stat_scsi_auth_intr_attribute			\
+			target_stat_scsi_auth_intr_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_stat_scsi_auth_intr_show_attr_##_name,			\
+	target_stat_scsi_auth_intr_store_attr_##_name);
+
+#define DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(_name)				\
+static struct target_stat_scsi_auth_intr_attribute			\
+			target_stat_scsi_auth_intr_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_stat_scsi_auth_intr_show_attr_##_name);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_inst(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	tpg = nacl->se_tpg;
+	/* scsiInstIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n",
+			tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_dev(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	lun = deve->se_lun;
+	/* scsiDeviceIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index);
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_port(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	tpg = nacl->se_tpg;
+	/* scsiAuthIntrTgtPortIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(port);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_indx(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* scsiAuthIntrIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_dev_or_port(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* scsiAuthIntrDevOrPort */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", 1);
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev_or_port);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_intr_name(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* scsiAuthIntrName */
+	ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname);
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(intr_name);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_map_indx(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* FIXME: scsiAuthIntrLunMapIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(map_indx);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_att_count(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* scsiAuthIntrAttachedTimes */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count);
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(att_count);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_num_cmds(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* scsiAuthIntrOutCommands */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", deve->total_cmds);
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(num_cmds);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_read_mbytes(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* scsiAuthIntrReadMegaBytes */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->read_bytes >> 20));
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(read_mbytes);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_write_mbytes(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* scsiAuthIntrWrittenMegaBytes */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->write_bytes >> 20));
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(write_mbytes);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_hs_num_cmds(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* FIXME: scsiAuthIntrHSOutCommands */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(hs_num_cmds);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_creation_time(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* scsiAuthIntrLastCreation */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time -
+				INITIAL_JIFFIES) * 100 / HZ));
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(creation_time);
+
+static ssize_t target_stat_scsi_auth_intr_show_attr_row_status(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* FIXME: scsiAuthIntrRowStatus */
+	ret = snprintf(page, PAGE_SIZE, "Ready\n");
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(row_status);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_auth_intr, se_ml_stat_grps,
+		scsi_auth_intr_group);
+
+static struct configfs_attribute *target_stat_scsi_auth_intr_attrs[] = {
+	&target_stat_scsi_auth_intr_inst.attr,
+	&target_stat_scsi_auth_intr_dev.attr,
+	&target_stat_scsi_auth_intr_port.attr,
+	&target_stat_scsi_auth_intr_indx.attr,
+	&target_stat_scsi_auth_intr_dev_or_port.attr,
+	&target_stat_scsi_auth_intr_intr_name.attr,
+	&target_stat_scsi_auth_intr_map_indx.attr,
+	&target_stat_scsi_auth_intr_att_count.attr,
+	&target_stat_scsi_auth_intr_num_cmds.attr,
+	&target_stat_scsi_auth_intr_read_mbytes.attr,
+	&target_stat_scsi_auth_intr_write_mbytes.attr,
+	&target_stat_scsi_auth_intr_hs_num_cmds.attr,
+	&target_stat_scsi_auth_intr_creation_time.attr,
+	&target_stat_scsi_auth_intr_row_status.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_auth_intr_attrib_ops = {
+	.show_attribute		= target_stat_scsi_auth_intr_attr_show,
+	.store_attribute	= target_stat_scsi_auth_intr_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_auth_intr_cit = {
+	.ct_item_ops		= &target_stat_scsi_auth_intr_attrib_ops,
+	.ct_attrs		= target_stat_scsi_auth_intr_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * SCSI Attached Initiator Port Table
+ */
+
+CONFIGFS_EATTR_STRUCT(target_stat_scsi_att_intr_port, se_ml_stat_grps);
+#define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR(_name, _mode)			\
+static struct target_stat_scsi_att_intr_port_attribute			\
+		target_stat_scsi_att_intr_port_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_stat_scsi_att_intr_port_show_attr_##_name,		\
+	target_stat_scsi_att_intr_port_store_attr_##_name);
+
+#define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(_name)			\
+static struct target_stat_scsi_att_intr_port_attribute			\
+		target_stat_scsi_att_intr_port_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_stat_scsi_att_intr_port_show_attr_##_name);
+
+static ssize_t target_stat_scsi_att_intr_port_show_attr_inst(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	tpg = nacl->se_tpg;
+	/* scsiInstIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n",
+			tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(inst);
+
+static ssize_t target_stat_scsi_att_intr_port_show_attr_dev(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	lun = deve->se_lun;
+	/* scsiDeviceIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index);
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(dev);
+
+static ssize_t target_stat_scsi_att_intr_port_show_attr_port(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	tpg = nacl->se_tpg;
+	/* scsiPortIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port);
+
+static ssize_t target_stat_scsi_att_intr_port_show_attr_indx(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_session *se_sess;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->nacl_sess_lock);
+	se_sess = nacl->nacl_sess;
+	if (!se_sess) {
+		spin_unlock_irq(&nacl->nacl_sess_lock);
+		return -ENODEV;
+	}
+
+	tpg = nacl->se_tpg;
+	/* scsiAttIntrPortIndex */
+	ret = snprintf(page, PAGE_SIZE, "%u\n",
+			tpg->se_tpg_tfo->sess_get_index(se_sess));
+	spin_unlock_irq(&nacl->nacl_sess_lock);
+	return ret;
+}
+DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(indx);
+
+static ssize_t target_stat_scsi_att_intr_port_show_attr_port_auth_indx(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t ret;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[lacl->mapped_lun];
+	if (!deve->se_lun || !deve->se_lun_acl) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -ENODEV;
+	}
+	/* scsiAttIntrPortAuthIntrIdx */
+	ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
+	spin_unlock_irq(&nacl->device_list_lock);
+	return ret;
+}
+DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_auth_indx);
+
+static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident(
+	struct se_ml_stat_grps *lgrps, char *page)
+{
+	struct se_lun_acl *lacl = container_of(lgrps,
+			struct se_lun_acl, ml_stat_grps);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_session *se_sess;
+	struct se_portal_group *tpg;
+	ssize_t ret;
+	unsigned char buf[64];
+
+	spin_lock_irq(&nacl->nacl_sess_lock);
+	se_sess = nacl->nacl_sess;
+	if (!se_sess) {
+		spin_unlock_irq(&nacl->nacl_sess_lock);
+		return -ENODEV;
+	}
+
+	tpg = nacl->se_tpg;
+	/* scsiAttIntrPortName+scsiAttIntrPortIdentifier */
+	memset(buf, 0, 64);
+	if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL)
+		tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, buf, 64);
+
+	ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
+	spin_unlock_irq(&nacl->nacl_sess_lock);
+	return ret;
+}
+DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_ident);
+
+CONFIGFS_EATTR_OPS(target_stat_scsi_att_intr_port, se_ml_stat_grps,
+		scsi_att_intr_port_group);
+
+static struct configfs_attribute *target_stat_scsi_ath_intr_port_attrs[] = {
+	&target_stat_scsi_att_intr_port_inst.attr,
+	&target_stat_scsi_att_intr_port_dev.attr,
+	&target_stat_scsi_att_intr_port_port.attr,
+	&target_stat_scsi_att_intr_port_indx.attr,
+	&target_stat_scsi_att_intr_port_port_auth_indx.attr,
+	&target_stat_scsi_att_intr_port_port_ident.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_stat_scsi_att_intr_port_attrib_ops = {
+	.show_attribute		= target_stat_scsi_att_intr_port_attr_show,
+	.store_attribute	= target_stat_scsi_att_intr_port_attr_store,
+};
+
+static struct config_item_type target_stat_scsi_att_intr_port_cit = {
+	.ct_item_ops		= &target_stat_scsi_att_intr_port_attrib_ops,
+	.ct_attrs		= target_stat_scsi_ath_intr_port_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*
+ * Called from target_core_fabric_configfs.c:target_fabric_make_mappedlun() to setup
+ * the target MappedLUN statistics groups + configfs CITs located in target_core_stat.c
+ */
+void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl)
+{
+	struct config_group *ml_stat_grp = &lacl->ml_stat_grps.stat_group;
+
+	config_group_init_type_name(&lacl->ml_stat_grps.scsi_auth_intr_group,
+			"scsi_auth_intr", &target_stat_scsi_auth_intr_cit);
+	config_group_init_type_name(&lacl->ml_stat_grps.scsi_att_intr_port_group,
+			"scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit);
+
+	ml_stat_grp->default_groups[0] = &lacl->ml_stat_grps.scsi_auth_intr_group;
+	ml_stat_grp->default_groups[1] = &lacl->ml_stat_grps.scsi_att_intr_port_group;
+	ml_stat_grp->default_groups[2] = NULL;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_tmr.c b/ap/os/linux/linux-3.4.x/drivers/target/target_core_tmr.c
new file mode 100644
index 0000000..4a5c6d7
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_tmr.c
@@ -0,0 +1,495 @@
+/*******************************************************************************
+ * Filename:  target_core_tmr.c
+ *
+ * This file contains SPC-3 task management infrastructure
+ *
+ * Copyright (c) 2009,2010 Rising Tide Systems
+ * Copyright (c) 2009,2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/export.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_internal.h"
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+
+int core_tmr_alloc_req(
+	struct se_cmd *se_cmd,
+	void *fabric_tmr_ptr,
+	u8 function,
+	gfp_t gfp_flags)
+{
+	struct se_tmr_req *tmr;
+
+	tmr = kzalloc(sizeof(struct se_tmr_req), gfp_flags);
+	if (!tmr) {
+		pr_err("Unable to allocate struct se_tmr_req\n");
+		return -ENOMEM;
+	}
+
+	se_cmd->se_cmd_flags |= SCF_SCSI_TMR_CDB;
+	se_cmd->se_tmr_req = tmr;
+	tmr->task_cmd = se_cmd;
+	tmr->fabric_tmr_ptr = fabric_tmr_ptr;
+	tmr->function = function;
+	INIT_LIST_HEAD(&tmr->tmr_list);
+
+	return 0;
+}
+EXPORT_SYMBOL(core_tmr_alloc_req);
+
+void core_tmr_release_req(
+	struct se_tmr_req *tmr)
+{
+	struct se_device *dev = tmr->tmr_dev;
+	unsigned long flags;
+
+	if (!dev) {
+		kfree(tmr);
+		return;
+	}
+
+	spin_lock_irqsave(&dev->se_tmr_lock, flags);
+	list_del(&tmr->tmr_list);
+	spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
+
+	kfree(tmr);
+}
+
+static void core_tmr_handle_tas_abort(
+	struct se_node_acl *tmr_nacl,
+	struct se_cmd *cmd,
+	int tas,
+	int fe_count)
+{
+	if (!fe_count) {
+		transport_cmd_finish_abort(cmd, 1);
+		return;
+	}
+	/*
+	 * TASK ABORTED status (TAS) bit support
+	*/
+	if ((tmr_nacl &&
+	     (tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
+		transport_send_task_abort(cmd);
+
+	transport_cmd_finish_abort(cmd, 0);
+}
+
+static int target_check_cdb_and_preempt(struct list_head *list,
+		struct se_cmd *cmd)
+{
+	struct t10_pr_registration *reg;
+
+	if (!list)
+		return 0;
+	list_for_each_entry(reg, list, pr_reg_abort_list) {
+		if (reg->pr_res_key == cmd->pr_res_key)
+			return 0;
+	}
+
+	return 1;
+}
+
+void core_tmr_abort_task(
+	struct se_device *dev,
+	struct se_tmr_req *tmr,
+	struct se_session *se_sess)
+{
+	struct se_cmd *se_cmd, *tmp_cmd;
+	unsigned long flags;
+	int ref_tag;
+
+	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+	list_for_each_entry_safe(se_cmd, tmp_cmd,
+			&se_sess->sess_cmd_list, se_cmd_list) {
+
+		if (dev != se_cmd->se_dev)
+			continue;
+		ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd);
+		if (tmr->ref_task_tag != ref_tag)
+			continue;
+
+		printk("ABORT_TASK: Found referenced %s task_tag: %u\n",
+			se_cmd->se_tfo->get_fabric_name(), ref_tag);
+
+		spin_lock(&se_cmd->t_state_lock);
+		if (se_cmd->transport_state & CMD_T_COMPLETE) {
+			printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag);
+			spin_unlock(&se_cmd->t_state_lock);
+			spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+			goto out;
+		}
+		se_cmd->transport_state |= CMD_T_ABORTED;
+		spin_unlock(&se_cmd->t_state_lock);
+
+		list_del_init(&se_cmd->se_cmd_list);
+		kref_get(&se_cmd->cmd_kref);
+		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
+		cancel_work_sync(&se_cmd->work);
+		transport_wait_for_tasks(se_cmd);
+		/*
+		 * Now send SAM_STAT_TASK_ABORTED status for the referenced
+		 * se_cmd descriptor..
+		 */
+		transport_send_task_abort(se_cmd);
+		/*
+		 * Also deal with possible extra acknowledge reference..
+		 */
+		if (se_cmd->se_cmd_flags & SCF_ACK_KREF)
+			target_put_sess_cmd(se_sess, se_cmd);
+
+		target_put_sess_cmd(se_sess, se_cmd);
+
+		printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
+				" ref_tag: %d\n", ref_tag);
+		tmr->response = TMR_FUNCTION_COMPLETE;
+		return;
+	}
+	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
+out:
+	printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %d\n",
+			tmr->ref_task_tag);
+	tmr->response = TMR_TASK_DOES_NOT_EXIST;
+}
+
+static void core_tmr_drain_tmr_list(
+	struct se_device *dev,
+	struct se_tmr_req *tmr,
+	struct list_head *preempt_and_abort_list)
+{
+	LIST_HEAD(drain_tmr_list);
+	struct se_tmr_req *tmr_p, *tmr_pp;
+	struct se_cmd *cmd;
+	unsigned long flags;
+	/*
+	 * Release all pending and outgoing TMRs aside from the received
+	 * LUN_RESET tmr..
+	 */
+	spin_lock_irqsave(&dev->se_tmr_lock, flags);
+	list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
+		/*
+		 * Allow the received TMR to return with FUNCTION_COMPLETE.
+		 */
+		if (tmr_p == tmr)
+			continue;
+
+		cmd = tmr_p->task_cmd;
+		if (!cmd) {
+			pr_err("Unable to locate struct se_cmd for TMR\n");
+			continue;
+		}
+		/*
+		 * If this function was called with a valid pr_res_key
+		 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
+		 * skip non regisration key matching TMRs.
+		 */
+		if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
+			continue;
+
+		spin_lock(&cmd->t_state_lock);
+		if (!(cmd->transport_state & CMD_T_ACTIVE)) {
+			spin_unlock(&cmd->t_state_lock);
+			continue;
+		}
+		if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
+			spin_unlock(&cmd->t_state_lock);
+			continue;
+		}
+		spin_unlock(&cmd->t_state_lock);
+
+		list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
+	}
+	spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
+
+	list_for_each_entry_safe(tmr_p, tmr_pp, &drain_tmr_list, tmr_list) {
+		list_del_init(&tmr_p->tmr_list);
+		cmd = tmr_p->task_cmd;
+
+		pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
+			" Response: 0x%02x, t_state: %d\n",
+			(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
+			tmr_p->function, tmr_p->response, cmd->t_state);
+
+		transport_cmd_finish_abort(cmd, 1);
+	}
+}
+
+static void core_tmr_drain_task_list(
+	struct se_device *dev,
+	struct se_cmd *prout_cmd,
+	struct se_node_acl *tmr_nacl,
+	int tas,
+	struct list_head *preempt_and_abort_list)
+{
+	LIST_HEAD(drain_task_list);
+	struct se_cmd *cmd;
+	struct se_task *task, *task_tmp;
+	unsigned long flags;
+	int fe_count;
+	/*
+	 * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
+	 * This is following sam4r17, section 5.6 Aborting commands, Table 38
+	 * for TMR LUN_RESET:
+	 *
+	 * a) "Yes" indicates that each command that is aborted on an I_T nexus
+	 * other than the one that caused the SCSI device condition is
+	 * completed with TASK ABORTED status, if the TAS bit is set to one in
+	 * the Control mode page (see SPC-4). "No" indicates that no status is
+	 * returned for aborted commands.
+	 *
+	 * d) If the logical unit reset is caused by a particular I_T nexus
+	 * (e.g., by a LOGICAL UNIT RESET task management function), then "yes"
+	 * (TASK_ABORTED status) applies.
+	 *
+	 * Otherwise (e.g., if triggered by a hard reset), "no"
+	 * (no TASK_ABORTED SAM status) applies.
+	 *
+	 * Note that this seems to be independent of TAS (Task Aborted Status)
+	 * in the Control Mode Page.
+	 */
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
+				t_state_list) {
+		if (!task->task_se_cmd) {
+			pr_err("task->task_se_cmd is NULL!\n");
+			continue;
+		}
+		cmd = task->task_se_cmd;
+
+		/*
+		 * For PREEMPT_AND_ABORT usage, only process commands
+		 * with a matching reservation key.
+		 */
+		if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
+			continue;
+		/*
+		 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
+		 */
+		if (prout_cmd == cmd)
+			continue;
+
+		list_move_tail(&task->t_state_list, &drain_task_list);
+		task->t_state_active = false;
+		/*
+		 * Remove from task execute list before processing drain_task_list
+		 */
+		if (!list_empty(&task->t_execute_list))
+			__transport_remove_task_from_execute_queue(task, dev);
+	}
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+	while (!list_empty(&drain_task_list)) {
+		task = list_entry(drain_task_list.next, struct se_task, t_state_list);
+		list_del(&task->t_state_list);
+		cmd = task->task_se_cmd;
+
+		pr_debug("LUN_RESET: %s cmd: %p task: %p"
+			" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d"
+			"cdb: 0x%02x\n",
+			(preempt_and_abort_list) ? "Preempt" : "", cmd, task,
+			cmd->se_tfo->get_task_tag(cmd), 0,
+			cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
+			cmd->t_task_cdb[0]);
+		pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
+			" t_task_cdbs: %d t_task_cdbs_left: %d"
+			" t_task_cdbs_sent: %d -- CMD_T_ACTIVE: %d"
+			" CMD_T_STOP: %d CMD_T_SENT: %d\n",
+			cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
+			cmd->t_task_list_num,
+			atomic_read(&cmd->t_task_cdbs_left),
+			atomic_read(&cmd->t_task_cdbs_sent),
+			(cmd->transport_state & CMD_T_ACTIVE) != 0,
+			(cmd->transport_state & CMD_T_STOP) != 0,
+			(cmd->transport_state & CMD_T_SENT) != 0);
+
+		/*
+		 * If the command may be queued onto a workqueue cancel it now.
+		 *
+		 * This is equivalent to removal from the execute queue in the
+		 * loop above, but we do it down here given that
+		 * cancel_work_sync may block.
+		 */
+		if (cmd->t_state == TRANSPORT_COMPLETE)
+			cancel_work_sync(&cmd->work);
+
+		spin_lock_irqsave(&cmd->t_state_lock, flags);
+		target_stop_task(task, &flags);
+
+		if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
+			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+			pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"
+				" t_task_cdbs_ex_left: %d\n", task, dev,
+				atomic_read(&cmd->t_task_cdbs_ex_left));
+			continue;
+		}
+		fe_count = atomic_read(&cmd->t_fe_count);
+
+		if (!(cmd->transport_state & CMD_T_ACTIVE)) {
+			pr_debug("LUN_RESET: got CMD_T_ACTIVE for"
+				" task: %p, t_fe_count: %d dev: %p\n", task,
+				fe_count, dev);
+			cmd->transport_state |= CMD_T_ABORTED;
+			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+			core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
+			continue;
+		}
+		pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for task: %p,"
+			" t_fe_count: %d dev: %p\n", task, fe_count, dev);
+		cmd->transport_state |= CMD_T_ABORTED;
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
+	}
+}
+
+static void core_tmr_drain_cmd_list(
+	struct se_device *dev,
+	struct se_cmd *prout_cmd,
+	struct se_node_acl *tmr_nacl,
+	int tas,
+	struct list_head *preempt_and_abort_list)
+{
+	LIST_HEAD(drain_cmd_list);
+	struct se_queue_obj *qobj = &dev->dev_queue_obj;
+	struct se_cmd *cmd, *tcmd;
+	unsigned long flags;
+	/*
+	 * Release all commands remaining in the struct se_device cmd queue.
+	 *
+	 * This follows the same logic as above for the struct se_device
+	 * struct se_task state list, where commands are returned with
+	 * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD
+	 * reference, otherwise the struct se_cmd is released.
+	 */
+	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+	list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) {
+		/*
+		 * For PREEMPT_AND_ABORT usage, only process commands
+		 * with a matching reservation key.
+		 */
+		if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
+			continue;
+		/*
+		 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
+		 */
+		if (prout_cmd == cmd)
+			continue;
+
+		cmd->transport_state &= ~CMD_T_QUEUED;
+		atomic_dec(&qobj->queue_cnt);
+		list_move_tail(&cmd->se_queue_node, &drain_cmd_list);
+	}
+	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+	while (!list_empty(&drain_cmd_list)) {
+		cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node);
+		list_del_init(&cmd->se_queue_node);
+
+		pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
+			" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
+			"Preempt" : "", cmd, cmd->t_state,
+			atomic_read(&cmd->t_fe_count));
+
+		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
+				atomic_read(&cmd->t_fe_count));
+	}
+}
+
+int core_tmr_lun_reset(
+        struct se_device *dev,
+        struct se_tmr_req *tmr,
+        struct list_head *preempt_and_abort_list,
+        struct se_cmd *prout_cmd)
+{
+	struct se_node_acl *tmr_nacl = NULL;
+	struct se_portal_group *tmr_tpg = NULL;
+	int tas;
+        /*
+	 * TASK_ABORTED status bit, this is configurable via ConfigFS
+	 * struct se_device attributes.  spc4r17 section 7.4.6 Control mode page
+	 *
+	 * A task aborted status (TAS) bit set to zero specifies that aborted
+	 * tasks shall be terminated by the device server without any response
+	 * to the application client. A TAS bit set to one specifies that tasks
+	 * aborted by the actions of an I_T nexus other than the I_T nexus on
+	 * which the command was received shall be completed with TASK ABORTED
+	 * status (see SAM-4).
+	 */
+	tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
+	/*
+	 * Determine if this se_tmr is coming from a $FABRIC_MOD
+	 * or struct se_device passthrough..
+	 */
+	if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
+		tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
+		tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
+		if (tmr_nacl && tmr_tpg) {
+			pr_debug("LUN_RESET: TMR caller fabric: %s"
+				" initiator port %s\n",
+				tmr_tpg->se_tpg_tfo->get_fabric_name(),
+				tmr_nacl->initiatorname);
+		}
+	}
+	pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
+		(preempt_and_abort_list) ? "Preempt" : "TMR",
+		dev->transport->name, tas);
+
+	core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
+	core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas,
+				preempt_and_abort_list);
+	core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas,
+				preempt_and_abort_list);
+	/*
+	 * Clear any legacy SPC-2 reservation when called during
+	 * LOGICAL UNIT RESET
+	 */
+	if (!preempt_and_abort_list &&
+	     (dev->dev_flags & DF_SPC2_RESERVATIONS)) {
+		spin_lock(&dev->dev_reservation_lock);
+		dev->dev_reserved_node_acl = NULL;
+		dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
+		spin_unlock(&dev->dev_reservation_lock);
+		pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
+	}
+
+	spin_lock_irq(&dev->stats_lock);
+	dev->num_resets++;
+	spin_unlock_irq(&dev->stats_lock);
+
+	pr_debug("LUN_RESET: %s for [%s] Complete\n",
+			(preempt_and_abort_list) ? "Preempt" : "TMR",
+			dev->transport->name);
+	return 0;
+}
+
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_tpg.c b/ap/os/linux/linux-3.4.x/drivers/target/target_core_tpg.c
new file mode 100644
index 0000000..0e17fa3
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_tpg.c
@@ -0,0 +1,865 @@
+/*******************************************************************************
+ * Filename:  target_core_tpg.c
+ *
+ * This file contains generic Target Portal Group related functions.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/in.h>
+#include <linux/export.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+
+extern struct se_device *g_lun0_dev;
+
+static DEFINE_SPINLOCK(tpg_lock);
+static LIST_HEAD(tpg_list);
+
+/*	core_clear_initiator_node_from_tpg():
+ *
+ *
+ */
+static void core_clear_initiator_node_from_tpg(
+	struct se_node_acl *nacl,
+	struct se_portal_group *tpg)
+{
+	int i;
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		deve = nacl->device_list[i];
+
+		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+			continue;
+
+		if (!deve->se_lun) {
+			pr_err("%s device entries device pointer is"
+				" NULL, but Initiator has access.\n",
+				tpg->se_tpg_tfo->get_fabric_name());
+			continue;
+		}
+
+		lun = deve->se_lun;
+		spin_unlock_irq(&nacl->device_list_lock);
+		core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
+			TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+
+		spin_lock_irq(&nacl->device_list_lock);
+	}
+	spin_unlock_irq(&nacl->device_list_lock);
+}
+
+/*	__core_tpg_get_initiator_node_acl():
+ *
+ *	spin_lock_bh(&tpg->acl_node_lock); must be held when calling
+ */
+struct se_node_acl *__core_tpg_get_initiator_node_acl(
+	struct se_portal_group *tpg,
+	const char *initiatorname)
+{
+	struct se_node_acl *acl;
+
+	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+		if (!strcmp(acl->initiatorname, initiatorname))
+			return acl;
+	}
+
+	return NULL;
+}
+
+/*	core_tpg_get_initiator_node_acl():
+ *
+ *
+ */
+struct se_node_acl *core_tpg_get_initiator_node_acl(
+	struct se_portal_group *tpg,
+	unsigned char *initiatorname)
+{
+	struct se_node_acl *acl;
+
+	spin_lock_irq(&tpg->acl_node_lock);
+	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
+	spin_unlock_irq(&tpg->acl_node_lock);
+
+	return acl;
+}
+
+/*	core_tpg_add_node_to_devs():
+ *
+ *
+ */
+void core_tpg_add_node_to_devs(
+	struct se_node_acl *acl,
+	struct se_portal_group *tpg)
+{
+	int i = 0;
+	u32 lun_access = 0;
+	struct se_lun *lun;
+	struct se_device *dev;
+
+	spin_lock(&tpg->tpg_lun_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		lun = tpg->tpg_lun_list[i];
+		if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
+			continue;
+
+		spin_unlock(&tpg->tpg_lun_lock);
+
+		dev = lun->lun_se_dev;
+		/*
+		 * By default in LIO-Target $FABRIC_MOD,
+		 * demo_mode_write_protect is ON, or READ_ONLY;
+		 */
+		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
+			if (dev->dev_flags & DF_READ_ONLY)
+				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+			else
+				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+		} else {
+			/*
+			 * Allow only optical drives to issue R/W in default RO
+			 * demo mode.
+			 */
+			if (dev->transport->get_device_type(dev) == TYPE_DISK)
+				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+			else
+				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+		}
+
+		pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
+			" access for LUN in Demo Mode\n",
+			tpg->se_tpg_tfo->get_fabric_name(),
+			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
+			(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
+			"READ-WRITE" : "READ-ONLY");
+
+		core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
+				lun_access, acl, tpg, 1);
+		spin_lock(&tpg->tpg_lun_lock);
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+}
+
+/*      core_set_queue_depth_for_node():
+ *
+ *
+ */
+static int core_set_queue_depth_for_node(
+	struct se_portal_group *tpg,
+	struct se_node_acl *acl)
+{
+	if (!acl->queue_depth) {
+		pr_err("Queue depth for %s Initiator Node: %s is 0,"
+			"defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
+			acl->initiatorname);
+		acl->queue_depth = 1;
+	}
+
+	return 0;
+}
+
+void array_free(void *array, int n)
+{
+	void **a = array;
+	int i;
+
+	for (i = 0; i < n; i++)
+		kfree(a[i]);
+	kfree(a);
+}
+
+static void *array_zalloc(int n, size_t size, gfp_t flags)
+{
+	void **a;
+	int i;
+
+	a = kzalloc(n * sizeof(void*), flags);
+	if (!a)
+		return NULL;
+	for (i = 0; i < n; i++) {
+		a[i] = kzalloc(size, flags);
+		if (!a[i]) {
+			array_free(a, n);
+			return NULL;
+		}
+	}
+	return a;
+}
+
+/*      core_create_device_list_for_node():
+ *
+ *
+ */
+static int core_create_device_list_for_node(struct se_node_acl *nacl)
+{
+	struct se_dev_entry *deve;
+	int i;
+
+	nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
+			sizeof(struct se_dev_entry), GFP_KERNEL);
+	if (!nacl->device_list) {
+		pr_err("Unable to allocate memory for"
+			" struct se_node_acl->device_list\n");
+		return -ENOMEM;
+	}
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		deve = nacl->device_list[i];
+
+		atomic_set(&deve->ua_count, 0);
+		atomic_set(&deve->pr_ref_count, 0);
+		spin_lock_init(&deve->ua_lock);
+		INIT_LIST_HEAD(&deve->alua_port_list);
+		INIT_LIST_HEAD(&deve->ua_list);
+	}
+
+	return 0;
+}
+
+/*	core_tpg_check_initiator_node_acl()
+ *
+ *
+ */
+struct se_node_acl *core_tpg_check_initiator_node_acl(
+	struct se_portal_group *tpg,
+	unsigned char *initiatorname)
+{
+	struct se_node_acl *acl;
+
+	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
+	if (acl)
+		return acl;
+
+	if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
+		return NULL;
+
+	acl =  tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
+	if (!acl)
+		return NULL;
+
+	INIT_LIST_HEAD(&acl->acl_list);
+	INIT_LIST_HEAD(&acl->acl_sess_list);
+	kref_init(&acl->acl_kref);
+	init_completion(&acl->acl_free_comp);
+	spin_lock_init(&acl->device_list_lock);
+	spin_lock_init(&acl->nacl_sess_lock);
+	atomic_set(&acl->acl_pr_ref_count, 0);
+	acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
+	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+	acl->se_tpg = tpg;
+	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
+	spin_lock_init(&acl->stats_lock);
+	acl->dynamic_node_acl = 1;
+
+	tpg->se_tpg_tfo->set_default_node_attributes(acl);
+
+	if (core_create_device_list_for_node(acl) < 0) {
+		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
+		return NULL;
+	}
+
+	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
+		core_free_device_list_for_node(acl, tpg);
+		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
+		return NULL;
+	}
+	/*
+	 * Here we only create demo-mode MappedLUNs from the active
+	 * TPG LUNs if the fabric is not explictly asking for
+	 * tpg_check_demo_mode_login_only() == 1.
+	 */
+	if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
+	    (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
+		do { ; } while (0);
+	else
+		core_tpg_add_node_to_devs(acl, tpg);
+
+	spin_lock_irq(&tpg->acl_node_lock);
+	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
+	tpg->num_node_acls++;
+	spin_unlock_irq(&tpg->acl_node_lock);
+
+	pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
+		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
+		tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
+
+	return acl;
+}
+EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
+
+void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
+{
+	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
+		cpu_relax();
+}
+
+void core_tpg_clear_object_luns(struct se_portal_group *tpg)
+{
+	int i;
+	struct se_lun *lun;
+
+	spin_lock(&tpg->tpg_lun_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		lun = tpg->tpg_lun_list[i];
+
+		if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
+		    (lun->lun_se_dev == NULL))
+			continue;
+
+		spin_unlock(&tpg->tpg_lun_lock);
+		core_dev_del_lun(tpg, lun->unpacked_lun);
+		spin_lock(&tpg->tpg_lun_lock);
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+}
+EXPORT_SYMBOL(core_tpg_clear_object_luns);
+
+/*	core_tpg_add_initiator_node_acl():
+ *
+ *
+ */
+struct se_node_acl *core_tpg_add_initiator_node_acl(
+	struct se_portal_group *tpg,
+	struct se_node_acl *se_nacl,
+	const char *initiatorname,
+	u32 queue_depth)
+{
+	struct se_node_acl *acl = NULL;
+
+	spin_lock_irq(&tpg->acl_node_lock);
+	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
+	if (acl) {
+		if (acl->dynamic_node_acl) {
+			acl->dynamic_node_acl = 0;
+			pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
+				" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+				tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
+			spin_unlock_irq(&tpg->acl_node_lock);
+			/*
+			 * Release the locally allocated struct se_node_acl
+			 * because * core_tpg_add_initiator_node_acl() returned
+			 * a pointer to an existing demo mode node ACL.
+			 */
+			if (se_nacl)
+				tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
+							se_nacl);
+			goto done;
+		}
+
+		pr_err("ACL entry for %s Initiator"
+			" Node %s already exists for TPG %u, ignoring"
+			" request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
+			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
+		spin_unlock_irq(&tpg->acl_node_lock);
+		return ERR_PTR(-EEXIST);
+	}
+	spin_unlock_irq(&tpg->acl_node_lock);
+
+	if (!se_nacl) {
+		pr_err("struct se_node_acl pointer is NULL\n");
+		return ERR_PTR(-EINVAL);
+	}
+	/*
+	 * For v4.x logic the se_node_acl_s is hanging off a fabric
+	 * dependent structure allocated via
+	 * struct target_core_fabric_ops->fabric_make_nodeacl()
+	 */
+	acl = se_nacl;
+
+	INIT_LIST_HEAD(&acl->acl_list);
+	INIT_LIST_HEAD(&acl->acl_sess_list);
+	kref_init(&acl->acl_kref);
+	init_completion(&acl->acl_free_comp);
+	spin_lock_init(&acl->device_list_lock);
+	spin_lock_init(&acl->nacl_sess_lock);
+	atomic_set(&acl->acl_pr_ref_count, 0);
+	acl->queue_depth = queue_depth;
+	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+	acl->se_tpg = tpg;
+	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
+	spin_lock_init(&acl->stats_lock);
+
+	tpg->se_tpg_tfo->set_default_node_attributes(acl);
+
+	if (core_create_device_list_for_node(acl) < 0) {
+		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
+		core_free_device_list_for_node(acl, tpg);
+		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
+		return ERR_PTR(-EINVAL);
+	}
+
+	spin_lock_irq(&tpg->acl_node_lock);
+	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
+	tpg->num_node_acls++;
+	spin_unlock_irq(&tpg->acl_node_lock);
+
+done:
+	pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
+		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
+		tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
+
+	return acl;
+}
+EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
+
+/*	core_tpg_del_initiator_node_acl():
+ *
+ *
+ */
+int core_tpg_del_initiator_node_acl(
+	struct se_portal_group *tpg,
+	struct se_node_acl *acl,
+	int force)
+{
+	LIST_HEAD(sess_list);
+	struct se_session *sess, *sess_tmp;
+	unsigned long flags;
+	int rc;
+
+	spin_lock_irq(&tpg->acl_node_lock);
+	if (acl->dynamic_node_acl) {
+		acl->dynamic_node_acl = 0;
+	}
+	list_del(&acl->acl_list);
+	tpg->num_node_acls--;
+	spin_unlock_irq(&tpg->acl_node_lock);
+
+	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
+	acl->acl_stop = 1;
+
+	list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
+				sess_acl_list) {
+		if (sess->sess_tearing_down != 0)
+			continue;
+
+		target_get_session(sess);
+		list_move(&sess->sess_acl_list, &sess_list);
+	}
+	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
+
+	list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
+		list_del(&sess->sess_acl_list);
+
+		rc = tpg->se_tpg_tfo->shutdown_session(sess);
+		target_put_session(sess);
+		if (!rc)
+			continue;
+		target_put_session(sess);
+	}
+	target_put_nacl(acl);
+	/*
+	 * Wait for last target_put_nacl() to complete in target_complete_nacl()
+	 * for active fabric session transport_deregister_session() callbacks.
+	 */
+	wait_for_completion(&acl->acl_free_comp);
+
+	core_tpg_wait_for_nacl_pr_ref(acl);
+	core_clear_initiator_node_from_tpg(acl, tpg);
+	core_free_device_list_for_node(acl, tpg);
+
+	pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
+		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
+		tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
+
+	return 0;
+}
+EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
+
+/*	core_tpg_set_initiator_node_queue_depth():
+ *
+ *
+ */
+int core_tpg_set_initiator_node_queue_depth(
+	struct se_portal_group *tpg,
+	unsigned char *initiatorname,
+	u32 queue_depth,
+	int force)
+{
+	struct se_session *sess, *init_sess = NULL;
+	struct se_node_acl *acl;
+	unsigned long flags;
+	int dynamic_acl = 0;
+
+	spin_lock_irq(&tpg->acl_node_lock);
+	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
+	if (!acl) {
+		pr_err("Access Control List entry for %s Initiator"
+			" Node %s does not exists for TPG %hu, ignoring"
+			" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
+			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
+		spin_unlock_irq(&tpg->acl_node_lock);
+		return -ENODEV;
+	}
+	if (acl->dynamic_node_acl) {
+		acl->dynamic_node_acl = 0;
+		dynamic_acl = 1;
+	}
+	spin_unlock_irq(&tpg->acl_node_lock);
+
+	spin_lock_irqsave(&tpg->session_lock, flags);
+	list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
+		if (sess->se_node_acl != acl)
+			continue;
+
+		if (!force) {
+			pr_err("Unable to change queue depth for %s"
+				" Initiator Node: %s while session is"
+				" operational.  To forcefully change the queue"
+				" depth and force session reinstatement"
+				" use the \"force=1\" parameter.\n",
+				tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
+			spin_unlock_irqrestore(&tpg->session_lock, flags);
+
+			spin_lock_irq(&tpg->acl_node_lock);
+			if (dynamic_acl)
+				acl->dynamic_node_acl = 1;
+			spin_unlock_irq(&tpg->acl_node_lock);
+			return -EEXIST;
+		}
+		/*
+		 * Determine if the session needs to be closed by our context.
+		 */
+		if (!tpg->se_tpg_tfo->shutdown_session(sess))
+			continue;
+
+		init_sess = sess;
+		break;
+	}
+
+	/*
+	 * User has requested to change the queue depth for a Initiator Node.
+	 * Change the value in the Node's struct se_node_acl, and call
+	 * core_set_queue_depth_for_node() to add the requested queue depth.
+	 *
+	 * Finally call  tpg->se_tpg_tfo->close_session() to force session
+	 * reinstatement to occur if there is an active session for the
+	 * $FABRIC_MOD Initiator Node in question.
+	 */
+	acl->queue_depth = queue_depth;
+
+	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
+		spin_unlock_irqrestore(&tpg->session_lock, flags);
+		/*
+		 * Force session reinstatement if
+		 * core_set_queue_depth_for_node() failed, because we assume
+		 * the $FABRIC_MOD has already the set session reinstatement
+		 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
+		 */
+		if (init_sess)
+			tpg->se_tpg_tfo->close_session(init_sess);
+
+		spin_lock_irq(&tpg->acl_node_lock);
+		if (dynamic_acl)
+			acl->dynamic_node_acl = 1;
+		spin_unlock_irq(&tpg->acl_node_lock);
+		return -EINVAL;
+	}
+	spin_unlock_irqrestore(&tpg->session_lock, flags);
+	/*
+	 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
+	 * forcefully shutdown the $FABRIC_MOD session/nexus.
+	 */
+	if (init_sess)
+		tpg->se_tpg_tfo->close_session(init_sess);
+
+	pr_debug("Successfully changed queue depth to: %d for Initiator"
+		" Node: %s on %s Target Portal Group: %u\n", queue_depth,
+		initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
+		tpg->se_tpg_tfo->tpg_get_tag(tpg));
+
+	spin_lock_irq(&tpg->acl_node_lock);
+	if (dynamic_acl)
+		acl->dynamic_node_acl = 1;
+	spin_unlock_irq(&tpg->acl_node_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
+
+static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
+{
+	/* Set in core_dev_setup_virtual_lun0() */
+	struct se_device *dev = g_lun0_dev;
+	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
+	u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+	int ret;
+
+	lun->unpacked_lun = 0;
+	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
+	atomic_set(&lun->lun_acl_count, 0);
+	init_completion(&lun->lun_shutdown_comp);
+	INIT_LIST_HEAD(&lun->lun_acl_list);
+	INIT_LIST_HEAD(&lun->lun_cmd_list);
+	spin_lock_init(&lun->lun_acl_lock);
+	spin_lock_init(&lun->lun_cmd_lock);
+	spin_lock_init(&lun->lun_sep_lock);
+
+	ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
+{
+	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
+
+	core_tpg_post_dellun(se_tpg, lun);
+}
+
+int core_tpg_register(
+	struct target_core_fabric_ops *tfo,
+	struct se_wwn *se_wwn,
+	struct se_portal_group *se_tpg,
+	void *tpg_fabric_ptr,
+	int se_tpg_type)
+{
+	struct se_lun *lun;
+	u32 i;
+
+	se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
+			sizeof(struct se_lun), GFP_KERNEL);
+	if (!se_tpg->tpg_lun_list) {
+		pr_err("Unable to allocate struct se_portal_group->"
+				"tpg_lun_list\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		lun = se_tpg->tpg_lun_list[i];
+		lun->unpacked_lun = i;
+		lun->lun_link_magic = SE_LUN_LINK_MAGIC;
+		lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
+		atomic_set(&lun->lun_acl_count, 0);
+		init_completion(&lun->lun_shutdown_comp);
+		INIT_LIST_HEAD(&lun->lun_acl_list);
+		INIT_LIST_HEAD(&lun->lun_cmd_list);
+		spin_lock_init(&lun->lun_acl_lock);
+		spin_lock_init(&lun->lun_cmd_lock);
+		spin_lock_init(&lun->lun_sep_lock);
+	}
+
+	se_tpg->se_tpg_type = se_tpg_type;
+	se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
+	se_tpg->se_tpg_tfo = tfo;
+	se_tpg->se_tpg_wwn = se_wwn;
+	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
+	INIT_LIST_HEAD(&se_tpg->acl_node_list);
+	INIT_LIST_HEAD(&se_tpg->se_tpg_node);
+	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
+	spin_lock_init(&se_tpg->acl_node_lock);
+	spin_lock_init(&se_tpg->session_lock);
+	spin_lock_init(&se_tpg->tpg_lun_lock);
+
+	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
+		if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
+			kfree(se_tpg);
+			return -ENOMEM;
+		}
+	}
+
+	spin_lock_bh(&tpg_lock);
+	list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
+	spin_unlock_bh(&tpg_lock);
+
+	pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
+		" endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
+		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
+		"Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
+		"None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
+
+	return 0;
+}
+EXPORT_SYMBOL(core_tpg_register);
+
+int core_tpg_deregister(struct se_portal_group *se_tpg)
+{
+	struct se_node_acl *nacl, *nacl_tmp;
+
+	pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
+		" for endpoint: %s Portal Tag %u\n",
+		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
+		"Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
+		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
+		se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
+
+	spin_lock_bh(&tpg_lock);
+	list_del(&se_tpg->se_tpg_node);
+	spin_unlock_bh(&tpg_lock);
+
+	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
+		cpu_relax();
+	/*
+	 * Release any remaining demo-mode generated se_node_acl that have
+	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
+	 * in transport_deregister_session().
+	 */
+	spin_lock_irq(&se_tpg->acl_node_lock);
+	list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
+			acl_list) {
+		list_del(&nacl->acl_list);
+		se_tpg->num_node_acls--;
+		spin_unlock_irq(&se_tpg->acl_node_lock);
+
+		core_tpg_wait_for_nacl_pr_ref(nacl);
+		core_free_device_list_for_node(nacl, se_tpg);
+		se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
+
+		spin_lock_irq(&se_tpg->acl_node_lock);
+	}
+	spin_unlock_irq(&se_tpg->acl_node_lock);
+
+	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
+		core_tpg_release_virtual_lun0(se_tpg);
+
+	se_tpg->se_tpg_fabric_ptr = NULL;
+	array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
+	return 0;
+}
+EXPORT_SYMBOL(core_tpg_deregister);
+
+struct se_lun *core_tpg_pre_addlun(
+	struct se_portal_group *tpg,
+	u32 unpacked_lun)
+{
+	struct se_lun *lun;
+
+	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+			"-1: %u for Target Portal Group: %u\n",
+			tpg->se_tpg_tfo->get_fabric_name(),
+			unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
+			tpg->se_tpg_tfo->tpg_get_tag(tpg));
+		return ERR_PTR(-EOVERFLOW);
+	}
+
+	spin_lock(&tpg->tpg_lun_lock);
+	lun = tpg->tpg_lun_list[unpacked_lun];
+	if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
+		pr_err("TPG Logical Unit Number: %u is already active"
+			" on %s Target Portal Group: %u, ignoring request.\n",
+			unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
+			tpg->se_tpg_tfo->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return ERR_PTR(-EINVAL);
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return lun;
+}
+
+int core_tpg_post_addlun(
+	struct se_portal_group *tpg,
+	struct se_lun *lun,
+	u32 lun_access,
+	void *lun_ptr)
+{
+	int ret;
+
+	ret = core_dev_export(lun_ptr, tpg, lun);
+	if (ret < 0)
+		return ret;
+
+	spin_lock(&tpg->tpg_lun_lock);
+	lun->lun_access = lun_access;
+	lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return 0;
+}
+
+static void core_tpg_shutdown_lun(
+	struct se_portal_group *tpg,
+	struct se_lun *lun)
+{
+	core_clear_lun_from_tpg(lun, tpg);
+	transport_clear_lun_from_sessions(lun);
+}
+
+struct se_lun *core_tpg_pre_dellun(
+	struct se_portal_group *tpg,
+	u32 unpacked_lun)
+{
+	struct se_lun *lun;
+
+	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+			"-1: %u for Target Portal Group: %u\n",
+			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+			TRANSPORT_MAX_LUNS_PER_TPG-1,
+			tpg->se_tpg_tfo->tpg_get_tag(tpg));
+		return ERR_PTR(-EOVERFLOW);
+	}
+
+	spin_lock(&tpg->tpg_lun_lock);
+	lun = tpg->tpg_lun_list[unpacked_lun];
+	if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
+		pr_err("%s Logical Unit Number: %u is not active on"
+			" Target Portal Group: %u, ignoring request.\n",
+			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+			tpg->se_tpg_tfo->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return ERR_PTR(-ENODEV);
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return lun;
+}
+
+int core_tpg_post_dellun(
+	struct se_portal_group *tpg,
+	struct se_lun *lun)
+{
+	core_tpg_shutdown_lun(tpg, lun);
+
+	core_dev_unexport(lun->lun_se_dev, tpg, lun);
+
+	spin_lock(&tpg->tpg_lun_lock);
+	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return 0;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_transport.c b/ap/os/linux/linux-3.4.x/drivers/target/target_core_transport.c
new file mode 100644
index 0000000..b4b308e
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_transport.c
@@ -0,0 +1,4889 @@
+/*******************************************************************************
+ * Filename:  target_core_transport.c
+ *
+ * This file contains the Generic Target Engine Core.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/in.h>
+#include <linux/cdrom.h>
+#include <linux/module.h>
+#include <linux/ratelimit.h>
+#include <asm/unaligned.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_internal.h"
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+static int sub_api_initialized;
+
+static struct workqueue_struct *target_completion_wq;
+static struct kmem_cache *se_sess_cache;
+struct kmem_cache *se_ua_cache;
+struct kmem_cache *t10_pr_reg_cache;
+struct kmem_cache *t10_alua_lu_gp_cache;
+struct kmem_cache *t10_alua_lu_gp_mem_cache;
+struct kmem_cache *t10_alua_tg_pt_gp_cache;
+struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+
+static int transport_generic_write_pending(struct se_cmd *);
+static int transport_processing_thread(void *param);
+static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
+static void transport_complete_task_attr(struct se_cmd *cmd);
+static void transport_handle_queue_full(struct se_cmd *cmd,
+		struct se_device *dev);
+static void transport_free_dev_tasks(struct se_cmd *cmd);
+static int transport_generic_get_mem(struct se_cmd *cmd);
+static void transport_put_cmd(struct se_cmd *cmd);
+static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
+static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
+static void target_complete_ok_work(struct work_struct *work);
+
+int init_se_kmem_caches(void)
+{
+	se_sess_cache = kmem_cache_create("se_sess_cache",
+			sizeof(struct se_session), __alignof__(struct se_session),
+			0, NULL);
+	if (!se_sess_cache) {
+		pr_err("kmem_cache_create() for struct se_session"
+				" failed\n");
+		goto out;
+	}
+	se_ua_cache = kmem_cache_create("se_ua_cache",
+			sizeof(struct se_ua), __alignof__(struct se_ua),
+			0, NULL);
+	if (!se_ua_cache) {
+		pr_err("kmem_cache_create() for struct se_ua failed\n");
+		goto out_free_sess_cache;
+	}
+	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
+			sizeof(struct t10_pr_registration),
+			__alignof__(struct t10_pr_registration), 0, NULL);
+	if (!t10_pr_reg_cache) {
+		pr_err("kmem_cache_create() for struct t10_pr_registration"
+				" failed\n");
+		goto out_free_ua_cache;
+	}
+	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
+			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
+			0, NULL);
+	if (!t10_alua_lu_gp_cache) {
+		pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
+				" failed\n");
+		goto out_free_pr_reg_cache;
+	}
+	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
+			sizeof(struct t10_alua_lu_gp_member),
+			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
+	if (!t10_alua_lu_gp_mem_cache) {
+		pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
+				"cache failed\n");
+		goto out_free_lu_gp_cache;
+	}
+	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
+			sizeof(struct t10_alua_tg_pt_gp),
+			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
+	if (!t10_alua_tg_pt_gp_cache) {
+		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
+				"cache failed\n");
+		goto out_free_lu_gp_mem_cache;
+	}
+	t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
+			"t10_alua_tg_pt_gp_mem_cache",
+			sizeof(struct t10_alua_tg_pt_gp_member),
+			__alignof__(struct t10_alua_tg_pt_gp_member),
+			0, NULL);
+	if (!t10_alua_tg_pt_gp_mem_cache) {
+		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
+				"mem_t failed\n");
+		goto out_free_tg_pt_gp_cache;
+	}
+
+	target_completion_wq = alloc_workqueue("target_completion",
+					       WQ_MEM_RECLAIM, 0);
+	if (!target_completion_wq)
+		goto out_free_tg_pt_gp_mem_cache;
+
+	return 0;
+
+out_free_tg_pt_gp_mem_cache:
+	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+out_free_tg_pt_gp_cache:
+	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
+out_free_lu_gp_mem_cache:
+	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
+out_free_lu_gp_cache:
+	kmem_cache_destroy(t10_alua_lu_gp_cache);
+out_free_pr_reg_cache:
+	kmem_cache_destroy(t10_pr_reg_cache);
+out_free_ua_cache:
+	kmem_cache_destroy(se_ua_cache);
+out_free_sess_cache:
+	kmem_cache_destroy(se_sess_cache);
+out:
+	return -ENOMEM;
+}
+
+void release_se_kmem_caches(void)
+{
+	destroy_workqueue(target_completion_wq);
+	kmem_cache_destroy(se_sess_cache);
+	kmem_cache_destroy(se_ua_cache);
+	kmem_cache_destroy(t10_pr_reg_cache);
+	kmem_cache_destroy(t10_alua_lu_gp_cache);
+	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
+	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
+	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+}
+
+/* This code ensures unique mib indexes are handed out. */
+static DEFINE_SPINLOCK(scsi_mib_index_lock);
+static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
+
+/*
+ * Allocate a new row index for the entry type specified
+ */
+u32 scsi_get_new_index(scsi_index_t type)
+{
+	u32 new_index;
+
+	BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
+
+	spin_lock(&scsi_mib_index_lock);
+	new_index = ++scsi_mib_index[type];
+	spin_unlock(&scsi_mib_index_lock);
+
+	return new_index;
+}
+
+static void transport_init_queue_obj(struct se_queue_obj *qobj)
+{
+	atomic_set(&qobj->queue_cnt, 0);
+	INIT_LIST_HEAD(&qobj->qobj_list);
+	init_waitqueue_head(&qobj->thread_wq);
+	spin_lock_init(&qobj->cmd_queue_lock);
+}
+
+void transport_subsystem_check_init(void)
+{
+	int ret;
+
+	if (sub_api_initialized)
+		return;
+
+	ret = request_module("target_core_iblock");
+	if (ret != 0)
+		pr_err("Unable to load target_core_iblock\n");
+
+	ret = request_module("target_core_file");
+	if (ret != 0)
+		pr_err("Unable to load target_core_file\n");
+
+	ret = request_module("target_core_pscsi");
+	if (ret != 0)
+		pr_err("Unable to load target_core_pscsi\n");
+
+	ret = request_module("target_core_stgt");
+	if (ret != 0)
+		pr_err("Unable to load target_core_stgt\n");
+
+	sub_api_initialized = 1;
+	return;
+}
+
+struct se_session *transport_init_session(void)
+{
+	struct se_session *se_sess;
+
+	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
+	if (!se_sess) {
+		pr_err("Unable to allocate struct se_session from"
+				" se_sess_cache\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&se_sess->sess_list);
+	INIT_LIST_HEAD(&se_sess->sess_acl_list);
+	INIT_LIST_HEAD(&se_sess->sess_cmd_list);
+	INIT_LIST_HEAD(&se_sess->sess_wait_list);
+	spin_lock_init(&se_sess->sess_cmd_lock);
+	kref_init(&se_sess->sess_kref);
+
+	return se_sess;
+}
+EXPORT_SYMBOL(transport_init_session);
+
+/*
+ * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
+ */
+void __transport_register_session(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct se_session *se_sess,
+	void *fabric_sess_ptr)
+{
+	unsigned char buf[PR_REG_ISID_LEN];
+
+	se_sess->se_tpg = se_tpg;
+	se_sess->fabric_sess_ptr = fabric_sess_ptr;
+	/*
+	 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
+	 *
+	 * Only set for struct se_session's that will actually be moving I/O.
+	 * eg: *NOT* discovery sessions.
+	 */
+	if (se_nacl) {
+		/*
+		 * If the fabric module supports an ISID based TransportID,
+		 * save this value in binary from the fabric I_T Nexus now.
+		 */
+		if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
+			memset(&buf[0], 0, PR_REG_ISID_LEN);
+			se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
+					&buf[0], PR_REG_ISID_LEN);
+			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
+		}
+		kref_get(&se_nacl->acl_kref);
+
+		spin_lock_irq(&se_nacl->nacl_sess_lock);
+		/*
+		 * The se_nacl->nacl_sess pointer will be set to the
+		 * last active I_T Nexus for each struct se_node_acl.
+		 */
+		se_nacl->nacl_sess = se_sess;
+
+		list_add_tail(&se_sess->sess_acl_list,
+			      &se_nacl->acl_sess_list);
+		spin_unlock_irq(&se_nacl->nacl_sess_lock);
+	}
+	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
+
+	pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
+		se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
+}
+EXPORT_SYMBOL(__transport_register_session);
+
+void transport_register_session(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct se_session *se_sess,
+	void *fabric_sess_ptr)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&se_tpg->session_lock, flags);
+	__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
+	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
+}
+EXPORT_SYMBOL(transport_register_session);
+
+static void target_release_session(struct kref *kref)
+{
+	struct se_session *se_sess = container_of(kref,
+			struct se_session, sess_kref);
+	struct se_portal_group *se_tpg = se_sess->se_tpg;
+
+	se_tpg->se_tpg_tfo->close_session(se_sess);
+}
+
+void target_get_session(struct se_session *se_sess)
+{
+	kref_get(&se_sess->sess_kref);
+}
+EXPORT_SYMBOL(target_get_session);
+
+int target_put_session(struct se_session *se_sess)
+{
+	return kref_put(&se_sess->sess_kref, target_release_session);
+}
+EXPORT_SYMBOL(target_put_session);
+
+static void target_complete_nacl(struct kref *kref)
+{
+	struct se_node_acl *nacl = container_of(kref,
+				struct se_node_acl, acl_kref);
+
+	complete(&nacl->acl_free_comp);
+}
+
+void target_put_nacl(struct se_node_acl *nacl)
+{
+	kref_put(&nacl->acl_kref, target_complete_nacl);
+}
+
+void transport_deregister_session_configfs(struct se_session *se_sess)
+{
+	struct se_node_acl *se_nacl;
+	unsigned long flags;
+	/*
+	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
+	 */
+	se_nacl = se_sess->se_node_acl;
+	if (se_nacl) {
+		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
+		if (se_nacl->acl_stop == 0)
+			list_del(&se_sess->sess_acl_list);
+		/*
+		 * If the session list is empty, then clear the pointer.
+		 * Otherwise, set the struct se_session pointer from the tail
+		 * element of the per struct se_node_acl active session list.
+		 */
+		if (list_empty(&se_nacl->acl_sess_list))
+			se_nacl->nacl_sess = NULL;
+		else {
+			se_nacl->nacl_sess = container_of(
+					se_nacl->acl_sess_list.prev,
+					struct se_session, sess_acl_list);
+		}
+		spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
+	}
+}
+EXPORT_SYMBOL(transport_deregister_session_configfs);
+
+void transport_free_session(struct se_session *se_sess)
+{
+	kmem_cache_free(se_sess_cache, se_sess);
+}
+EXPORT_SYMBOL(transport_free_session);
+
+void transport_deregister_session(struct se_session *se_sess)
+{
+	struct se_portal_group *se_tpg = se_sess->se_tpg;
+	struct target_core_fabric_ops *se_tfo;
+	struct se_node_acl *se_nacl;
+	unsigned long flags;
+	bool comp_nacl = true;
+
+	if (!se_tpg) {
+		transport_free_session(se_sess);
+		return;
+	}
+	se_tfo = se_tpg->se_tpg_tfo;
+
+	spin_lock_irqsave(&se_tpg->session_lock, flags);
+	list_del(&se_sess->sess_list);
+	se_sess->se_tpg = NULL;
+	se_sess->fabric_sess_ptr = NULL;
+	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
+
+	/*
+	 * Determine if we need to do extra work for this initiator node's
+	 * struct se_node_acl if it had been previously dynamically generated.
+	 */
+	se_nacl = se_sess->se_node_acl;
+
+	spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
+	if (se_nacl && se_nacl->dynamic_node_acl) {
+		if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
+			list_del(&se_nacl->acl_list);
+			se_tpg->num_node_acls--;
+			spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
+			core_tpg_wait_for_nacl_pr_ref(se_nacl);
+			core_free_device_list_for_node(se_nacl, se_tpg);
+			se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl);
+
+			comp_nacl = false;
+			spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
+		}
+	}
+	spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
+
+	pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
+		se_tpg->se_tpg_tfo->get_fabric_name());
+	/*
+	 * If last kref is dropping now for an explict NodeACL, awake sleeping
+	 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
+	 * removal context.
+	 */
+	if (se_nacl && comp_nacl == true)
+		target_put_nacl(se_nacl);
+
+	transport_free_session(se_sess);
+}
+EXPORT_SYMBOL(transport_deregister_session);
+
+/*
+ * Called with cmd->t_state_lock held.
+ */
+static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_task *task;
+	unsigned long flags;
+
+	if (!dev)
+		return;
+
+	list_for_each_entry(task, &cmd->t_task_list, t_list) {
+		if (task->task_flags & TF_ACTIVE)
+			continue;
+
+		spin_lock_irqsave(&dev->execute_task_lock, flags);
+		if (task->t_state_active) {
+			pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
+				cmd->se_tfo->get_task_tag(cmd), dev, task);
+
+			list_del(&task->t_state_list);
+			atomic_dec(&cmd->t_task_cdbs_ex_left);
+			task->t_state_active = false;
+		}
+		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+	}
+
+}
+
+/*	transport_cmd_check_stop():
+ *
+ *	'transport_off = 1' determines if CMD_T_ACTIVE should be cleared.
+ *	'transport_off = 2' determines if task_dev_state should be removed.
+ *
+ *	A non-zero u8 t_state sets cmd->t_state.
+ *	Returns 1 when command is stopped, else 0.
+ */
+static int transport_cmd_check_stop(
+	struct se_cmd *cmd,
+	int transport_off,
+	u8 t_state)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	/*
+	 * Determine if IOCTL context caller in requesting the stopping of this
+	 * command for LUN shutdown purposes.
+	 */
+	if (cmd->transport_state & CMD_T_LUN_STOP) {
+		pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
+			__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
+
+		cmd->transport_state &= ~CMD_T_ACTIVE;
+		if (transport_off == 2)
+			transport_all_task_dev_remove_state(cmd);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+		complete(&cmd->transport_lun_stop_comp);
+		return 1;
+	}
+	/*
+	 * Determine if frontend context caller is requesting the stopping of
+	 * this command for frontend exceptions.
+	 */
+	if (cmd->transport_state & CMD_T_STOP) {
+		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
+			__func__, __LINE__,
+			cmd->se_tfo->get_task_tag(cmd));
+
+		if (transport_off == 2)
+			transport_all_task_dev_remove_state(cmd);
+
+		/*
+		 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
+		 * to FE.
+		 */
+		if (transport_off == 2)
+			cmd->se_lun = NULL;
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+		complete(&cmd->t_transport_stop_comp);
+		return 1;
+	}
+	if (transport_off) {
+		cmd->transport_state &= ~CMD_T_ACTIVE;
+		if (transport_off == 2) {
+			transport_all_task_dev_remove_state(cmd);
+			/*
+			 * Clear struct se_cmd->se_lun before the transport_off == 2
+			 * handoff to fabric module.
+			 */
+			cmd->se_lun = NULL;
+			/*
+			 * Some fabric modules like tcm_loop can release
+			 * their internally allocated I/O reference now and
+			 * struct se_cmd now.
+			 *
+			 * Fabric modules are expected to return '1' here if the
+			 * se_cmd being passed is released at this point,
+			 * or zero if not being released.
+			 */
+			if (cmd->se_tfo->check_stop_free != NULL) {
+				spin_unlock_irqrestore(
+					&cmd->t_state_lock, flags);
+
+				return cmd->se_tfo->check_stop_free(cmd);
+			}
+		}
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+		return 0;
+	} else if (t_state)
+		cmd->t_state = t_state;
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	return 0;
+}
+
+static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
+{
+	return transport_cmd_check_stop(cmd, 2, 0);
+}
+
+static void transport_lun_remove_cmd(struct se_cmd *cmd)
+{
+	struct se_lun *lun = cmd->se_lun;
+	unsigned long flags;
+
+	if (!lun)
+		return;
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
+		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
+		transport_all_task_dev_remove_state(cmd);
+	}
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	spin_lock_irqsave(&lun->lun_cmd_lock, flags);
+	if (!list_empty(&cmd->se_lun_node))
+		list_del_init(&cmd->se_lun_node);
+	spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
+}
+
+void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+{
+	if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+		transport_lun_remove_cmd(cmd);
+
+	if (transport_cmd_check_stop_to_fabric(cmd))
+		return;
+	if (remove) {
+		transport_remove_cmd_from_queue(cmd);
+		transport_put_cmd(cmd);
+	}
+}
+
+static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
+		bool at_head)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_queue_obj *qobj = &dev->dev_queue_obj;
+	unsigned long flags;
+
+	if (t_state) {
+		spin_lock_irqsave(&cmd->t_state_lock, flags);
+		cmd->t_state = t_state;
+		cmd->transport_state |= CMD_T_ACTIVE;
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+	}
+
+	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+
+	/* If the cmd is already on the list, remove it before we add it */
+	if (!list_empty(&cmd->se_queue_node))
+		list_del(&cmd->se_queue_node);
+	else
+		atomic_inc(&qobj->queue_cnt);
+
+	if (at_head)
+		list_add(&cmd->se_queue_node, &qobj->qobj_list);
+	else
+		list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
+	cmd->transport_state |= CMD_T_QUEUED;
+	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+	wake_up_interruptible(&qobj->thread_wq);
+}
+
+static struct se_cmd *
+transport_get_cmd_from_queue(struct se_queue_obj *qobj)
+{
+	struct se_cmd *cmd;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+	if (list_empty(&qobj->qobj_list)) {
+		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+		return NULL;
+	}
+	cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
+
+	cmd->transport_state &= ~CMD_T_QUEUED;
+	list_del_init(&cmd->se_queue_node);
+	atomic_dec(&qobj->queue_cnt);
+	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+	return cmd;
+}
+
+static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
+{
+	struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+	if (!(cmd->transport_state & CMD_T_QUEUED)) {
+		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+		return;
+	}
+	cmd->transport_state &= ~CMD_T_QUEUED;
+	atomic_dec(&qobj->queue_cnt);
+	list_del_init(&cmd->se_queue_node);
+	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+}
+
+/*
+ * Completion function used by TCM subsystem plugins (such as FILEIO)
+ * for queueing up response from struct se_subsystem_api->do_task()
+ */
+void transport_complete_sync_cache(struct se_cmd *cmd, int good)
+{
+	struct se_task *task = list_entry(cmd->t_task_list.next,
+				struct se_task, t_list);
+
+	if (good) {
+		cmd->scsi_status = SAM_STAT_GOOD;
+		task->task_scsi_status = GOOD;
+	} else {
+		task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
+		task->task_se_cmd->scsi_sense_reason =
+				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	}
+
+	transport_complete_task(task, good);
+}
+EXPORT_SYMBOL(transport_complete_sync_cache);
+
+static void target_complete_failure_work(struct work_struct *work)
+{
+	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
+
+	transport_generic_request_failure(cmd);
+}
+
+/*	transport_complete_task():
+ *
+ *	Called from interrupt and non interrupt context depending
+ *	on the transport plugin.
+ */
+void transport_complete_task(struct se_task *task, int success)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct se_device *dev = cmd->se_dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	task->task_flags &= ~TF_ACTIVE;
+
+	/*
+	 * See if any sense data exists, if so set the TASK_SENSE flag.
+	 * Also check for any other post completion work that needs to be
+	 * done by the plugins.
+	 */
+	if (dev && dev->transport->transport_complete) {
+		if (dev->transport->transport_complete(task) != 0) {
+			cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
+			task->task_flags |= TF_HAS_SENSE;
+			success = 1;
+		}
+	}
+
+	/*
+	 * See if we are waiting for outstanding struct se_task
+	 * to complete for an exception condition
+	 */
+	if (task->task_flags & TF_REQUEST_STOP) {
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		complete(&task->task_stop_comp);
+		return;
+	}
+
+	if (!success)
+		cmd->transport_state |= CMD_T_FAILED;
+
+	/*
+	 * Decrement the outstanding t_task_cdbs_left count.  The last
+	 * struct se_task from struct se_cmd will complete itself into the
+	 * device queue depending upon int success.
+	 */
+	if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		return;
+	}
+	/*
+	 * Check for case where an explict ABORT_TASK has been received
+	 * and transport_wait_for_tasks() will be waiting for completion..
+	 */
+	if (cmd->transport_state & CMD_T_ABORTED &&
+	    cmd->transport_state & CMD_T_STOP) {
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		complete(&cmd->t_transport_stop_comp);
+		return;
+	} else if (cmd->transport_state & CMD_T_FAILED) {
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		INIT_WORK(&cmd->work, target_complete_failure_work);
+	} else {
+		INIT_WORK(&cmd->work, target_complete_ok_work);
+	}
+
+	cmd->t_state = TRANSPORT_COMPLETE;
+	cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	queue_work(target_completion_wq, &cmd->work);
+}
+EXPORT_SYMBOL(transport_complete_task);
+
+/*
+ * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
+ * struct se_task list are ready to be added to the active execution list
+ * struct se_device
+
+ * Called with se_dev_t->execute_task_lock called.
+ */
+static inline int transport_add_task_check_sam_attr(
+	struct se_task *task,
+	struct se_task *task_prev,
+	struct se_device *dev)
+{
+	/*
+	 * No SAM Task attribute emulation enabled, add to tail of
+	 * execution queue
+	 */
+	if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
+		list_add_tail(&task->t_execute_list, &dev->execute_task_list);
+		return 0;
+	}
+	/*
+	 * HEAD_OF_QUEUE attribute for received CDB, which means
+	 * the first task that is associated with a struct se_cmd goes to
+	 * head of the struct se_device->execute_task_list, and task_prev
+	 * after that for each subsequent task
+	 */
+	if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
+		list_add(&task->t_execute_list,
+				(task_prev != NULL) ?
+				&task_prev->t_execute_list :
+				&dev->execute_task_list);
+
+		pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
+				" in execution queue\n",
+				task->task_se_cmd->t_task_cdb[0]);
+		return 1;
+	}
+	/*
+	 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
+	 * transitioned from Dermant -> Active state, and are added to the end
+	 * of the struct se_device->execute_task_list
+	 */
+	list_add_tail(&task->t_execute_list, &dev->execute_task_list);
+	return 0;
+}
+
+/*	__transport_add_task_to_execute_queue():
+ *
+ *	Called with se_dev_t->execute_task_lock called.
+ */
+static void __transport_add_task_to_execute_queue(
+	struct se_task *task,
+	struct se_task *task_prev,
+	struct se_device *dev)
+{
+	int head_of_queue;
+
+	head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
+	atomic_inc(&dev->execute_tasks);
+
+	if (task->t_state_active)
+		return;
+	/*
+	 * Determine if this task needs to go to HEAD_OF_QUEUE for the
+	 * state list as well.  Running with SAM Task Attribute emulation
+	 * will always return head_of_queue == 0 here
+	 */
+	if (head_of_queue)
+		list_add(&task->t_state_list, (task_prev) ?
+				&task_prev->t_state_list :
+				&dev->state_task_list);
+	else
+		list_add_tail(&task->t_state_list, &dev->state_task_list);
+
+	task->t_state_active = true;
+
+	pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
+		task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
+		task, dev);
+}
+
+static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_task *task;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	list_for_each_entry(task, &cmd->t_task_list, t_list) {
+		spin_lock(&dev->execute_task_lock);
+		if (!task->t_state_active) {
+			list_add_tail(&task->t_state_list,
+				      &dev->state_task_list);
+			task->t_state_active = true;
+
+			pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
+				task->task_se_cmd->se_tfo->get_task_tag(
+				task->task_se_cmd), task, dev);
+		}
+		spin_unlock(&dev->execute_task_lock);
+	}
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+}
+
+static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_task *task, *task_prev = NULL;
+
+	list_for_each_entry(task, &cmd->t_task_list, t_list) {
+		if (!list_empty(&task->t_execute_list))
+			continue;
+		/*
+		 * __transport_add_task_to_execute_queue() handles the
+		 * SAM Task Attribute emulation if enabled
+		 */
+		__transport_add_task_to_execute_queue(task, task_prev, dev);
+		task_prev = task;
+	}
+}
+
+static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
+{
+	unsigned long flags;
+	struct se_device *dev = cmd->se_dev;
+
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	__transport_add_tasks_from_cmd(cmd);
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+}
+
+void __transport_remove_task_from_execute_queue(struct se_task *task,
+		struct se_device *dev)
+{
+	list_del_init(&task->t_execute_list);
+	atomic_dec(&dev->execute_tasks);
+}
+
+static void transport_remove_task_from_execute_queue(
+	struct se_task *task,
+	struct se_device *dev)
+{
+	unsigned long flags;
+
+	if (WARN_ON(list_empty(&task->t_execute_list)))
+		return;
+
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	__transport_remove_task_from_execute_queue(task, dev);
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+}
+
+/*
+ * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
+ */
+
+static void target_qf_do_work(struct work_struct *work)
+{
+	struct se_device *dev = container_of(work, struct se_device,
+					qf_work_queue);
+	LIST_HEAD(qf_cmd_list);
+	struct se_cmd *cmd, *cmd_tmp;
+
+	spin_lock_irq(&dev->qf_cmd_lock);
+	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
+	spin_unlock_irq(&dev->qf_cmd_lock);
+
+	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
+		list_del(&cmd->se_qf_node);
+		atomic_dec(&dev->dev_qf_count);
+		smp_mb__after_atomic_dec();
+
+		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
+			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
+			(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
+			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
+			: "UNKNOWN");
+
+		transport_add_cmd_to_queue(cmd, cmd->t_state, true);
+	}
+}
+
+unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
+{
+	switch (cmd->data_direction) {
+	case DMA_NONE:
+		return "NONE";
+	case DMA_FROM_DEVICE:
+		return "READ";
+	case DMA_TO_DEVICE:
+		return "WRITE";
+	case DMA_BIDIRECTIONAL:
+		return "BIDI";
+	default:
+		break;
+	}
+
+	return "UNKNOWN";
+}
+
+void transport_dump_dev_state(
+	struct se_device *dev,
+	char *b,
+	int *bl)
+{
+	*bl += sprintf(b + *bl, "Status: ");
+	switch (dev->dev_status) {
+	case TRANSPORT_DEVICE_ACTIVATED:
+		*bl += sprintf(b + *bl, "ACTIVATED");
+		break;
+	case TRANSPORT_DEVICE_DEACTIVATED:
+		*bl += sprintf(b + *bl, "DEACTIVATED");
+		break;
+	case TRANSPORT_DEVICE_SHUTDOWN:
+		*bl += sprintf(b + *bl, "SHUTDOWN");
+		break;
+	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
+	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
+		*bl += sprintf(b + *bl, "OFFLINE");
+		break;
+	default:
+		*bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
+		break;
+	}
+
+	*bl += sprintf(b + *bl, "  Execute/Max Queue Depth: %d/%d",
+		atomic_read(&dev->execute_tasks), dev->queue_depth);
+	*bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
+		dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
+	*bl += sprintf(b + *bl, "        ");
+}
+
+void transport_dump_vpd_proto_id(
+	struct t10_vpd *vpd,
+	unsigned char *p_buf,
+	int p_buf_len)
+{
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	int len;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+	len = sprintf(buf, "T10 VPD Protocol Identifier: ");
+
+	switch (vpd->protocol_identifier) {
+	case 0x00:
+		sprintf(buf+len, "Fibre Channel\n");
+		break;
+	case 0x10:
+		sprintf(buf+len, "Parallel SCSI\n");
+		break;
+	case 0x20:
+		sprintf(buf+len, "SSA\n");
+		break;
+	case 0x30:
+		sprintf(buf+len, "IEEE 1394\n");
+		break;
+	case 0x40:
+		sprintf(buf+len, "SCSI Remote Direct Memory Access"
+				" Protocol\n");
+		break;
+	case 0x50:
+		sprintf(buf+len, "Internet SCSI (iSCSI)\n");
+		break;
+	case 0x60:
+		sprintf(buf+len, "SAS Serial SCSI Protocol\n");
+		break;
+	case 0x70:
+		sprintf(buf+len, "Automation/Drive Interface Transport"
+				" Protocol\n");
+		break;
+	case 0x80:
+		sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
+		break;
+	default:
+		sprintf(buf+len, "Unknown 0x%02x\n",
+				vpd->protocol_identifier);
+		break;
+	}
+
+	if (p_buf)
+		strncpy(p_buf, buf, p_buf_len);
+	else
+		pr_debug("%s", buf);
+}
+
+void
+transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
+{
+	/*
+	 * Check if the Protocol Identifier Valid (PIV) bit is set..
+	 *
+	 * from spc3r23.pdf section 7.5.1
+	 */
+	 if (page_83[1] & 0x80) {
+		vpd->protocol_identifier = (page_83[0] & 0xf0);
+		vpd->protocol_identifier_set = 1;
+		transport_dump_vpd_proto_id(vpd, NULL, 0);
+	}
+}
+EXPORT_SYMBOL(transport_set_vpd_proto_id);
+
+int transport_dump_vpd_assoc(
+	struct t10_vpd *vpd,
+	unsigned char *p_buf,
+	int p_buf_len)
+{
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	int ret = 0;
+	int len;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+	len = sprintf(buf, "T10 VPD Identifier Association: ");
+
+	switch (vpd->association) {
+	case 0x00:
+		sprintf(buf+len, "addressed logical unit\n");
+		break;
+	case 0x10:
+		sprintf(buf+len, "target port\n");
+		break;
+	case 0x20:
+		sprintf(buf+len, "SCSI target device\n");
+		break;
+	default:
+		sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
+		ret = -EINVAL;
+		break;
+	}
+
+	if (p_buf)
+		strncpy(p_buf, buf, p_buf_len);
+	else
+		pr_debug("%s", buf);
+
+	return ret;
+}
+
+int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
+{
+	/*
+	 * The VPD identification association..
+	 *
+	 * from spc3r23.pdf Section 7.6.3.1 Table 297
+	 */
+	vpd->association = (page_83[1] & 0x30);
+	return transport_dump_vpd_assoc(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_assoc);
+
+int transport_dump_vpd_ident_type(
+	struct t10_vpd *vpd,
+	unsigned char *p_buf,
+	int p_buf_len)
+{
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	int ret = 0;
+	int len;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+	len = sprintf(buf, "T10 VPD Identifier Type: ");
+
+	switch (vpd->device_identifier_type) {
+	case 0x00:
+		sprintf(buf+len, "Vendor specific\n");
+		break;
+	case 0x01:
+		sprintf(buf+len, "T10 Vendor ID based\n");
+		break;
+	case 0x02:
+		sprintf(buf+len, "EUI-64 based\n");
+		break;
+	case 0x03:
+		sprintf(buf+len, "NAA\n");
+		break;
+	case 0x04:
+		sprintf(buf+len, "Relative target port identifier\n");
+		break;
+	case 0x08:
+		sprintf(buf+len, "SCSI name string\n");
+		break;
+	default:
+		sprintf(buf+len, "Unsupported: 0x%02x\n",
+				vpd->device_identifier_type);
+		ret = -EINVAL;
+		break;
+	}
+
+	if (p_buf) {
+		if (p_buf_len < strlen(buf)+1)
+			return -EINVAL;
+		strncpy(p_buf, buf, p_buf_len);
+	} else {
+		pr_debug("%s", buf);
+	}
+
+	return ret;
+}
+
+int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
+{
+	/*
+	 * The VPD identifier type..
+	 *
+	 * from spc3r23.pdf Section 7.6.3.1 Table 298
+	 */
+	vpd->device_identifier_type = (page_83[1] & 0x0f);
+	return transport_dump_vpd_ident_type(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_ident_type);
+
+int transport_dump_vpd_ident(
+	struct t10_vpd *vpd,
+	unsigned char *p_buf,
+	int p_buf_len)
+{
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	int ret = 0;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+
+	switch (vpd->device_identifier_code_set) {
+	case 0x01: /* Binary */
+		sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
+			&vpd->device_identifier[0]);
+		break;
+	case 0x02: /* ASCII */
+		sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
+			&vpd->device_identifier[0]);
+		break;
+	case 0x03: /* UTF-8 */
+		sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
+			&vpd->device_identifier[0]);
+		break;
+	default:
+		sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
+			" 0x%02x", vpd->device_identifier_code_set);
+		ret = -EINVAL;
+		break;
+	}
+
+	if (p_buf)
+		strncpy(p_buf, buf, p_buf_len);
+	else
+		pr_debug("%s", buf);
+
+	return ret;
+}
+
+int
+transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
+{
+	static const char hex_str[] = "0123456789abcdef";
+	int j = 0, i = 4; /* offset to start of the identifer */
+
+	/*
+	 * The VPD Code Set (encoding)
+	 *
+	 * from spc3r23.pdf Section 7.6.3.1 Table 296
+	 */
+	vpd->device_identifier_code_set = (page_83[0] & 0x0f);
+	switch (vpd->device_identifier_code_set) {
+	case 0x01: /* Binary */
+		vpd->device_identifier[j++] =
+				hex_str[vpd->device_identifier_type];
+		while (i < (4 + page_83[3])) {
+			vpd->device_identifier[j++] =
+				hex_str[(page_83[i] & 0xf0) >> 4];
+			vpd->device_identifier[j++] =
+				hex_str[page_83[i] & 0x0f];
+			i++;
+		}
+		break;
+	case 0x02: /* ASCII */
+	case 0x03: /* UTF-8 */
+		while (i < (4 + page_83[3]))
+			vpd->device_identifier[j++] = page_83[i++];
+		break;
+	default:
+		break;
+	}
+
+	return transport_dump_vpd_ident(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_ident);
+
+static void core_setup_task_attr_emulation(struct se_device *dev)
+{
+	/*
+	 * If this device is from Target_Core_Mod/pSCSI, disable the
+	 * SAM Task Attribute emulation.
+	 *
+	 * This is currently not available in upsream Linux/SCSI Target
+	 * mode code, and is assumed to be disabled while using TCM/pSCSI.
+	 */
+	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
+		return;
+	}
+
+	dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
+	pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
+		" device\n", dev->transport->name,
+		dev->transport->get_device_rev(dev));
+}
+
+static void scsi_dump_inquiry(struct se_device *dev)
+{
+	struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
+	char buf[17];
+	int i, device_type;
+	/*
+	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
+	 */
+	for (i = 0; i < 8; i++)
+		if (wwn->vendor[i] >= 0x20)
+			buf[i] = wwn->vendor[i];
+		else
+			buf[i] = ' ';
+	buf[i] = '\0';
+	pr_debug("  Vendor: %s\n", buf);
+
+	for (i = 0; i < 16; i++)
+		if (wwn->model[i] >= 0x20)
+			buf[i] = wwn->model[i];
+		else
+			buf[i] = ' ';
+	buf[i] = '\0';
+	pr_debug("  Model: %s\n", buf);
+
+	for (i = 0; i < 4; i++)
+		if (wwn->revision[i] >= 0x20)
+			buf[i] = wwn->revision[i];
+		else
+			buf[i] = ' ';
+	buf[i] = '\0';
+	pr_debug("  Revision: %s\n", buf);
+
+	device_type = dev->transport->get_device_type(dev);
+	pr_debug("  Type:   %s ", scsi_device_type(device_type));
+	pr_debug("                 ANSI SCSI revision: %02x\n",
+				dev->transport->get_device_rev(dev));
+}
+
+struct se_device *transport_add_device_to_core_hba(
+	struct se_hba *hba,
+	struct se_subsystem_api *transport,
+	struct se_subsystem_dev *se_dev,
+	u32 device_flags,
+	void *transport_dev,
+	struct se_dev_limits *dev_limits,
+	const char *inquiry_prod,
+	const char *inquiry_rev)
+{
+	int force_pt;
+	struct se_device  *dev;
+
+	dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
+	if (!dev) {
+		pr_err("Unable to allocate memory for se_dev_t\n");
+		return NULL;
+	}
+
+	transport_init_queue_obj(&dev->dev_queue_obj);
+	dev->dev_flags		= device_flags;
+	dev->dev_status		|= TRANSPORT_DEVICE_DEACTIVATED;
+	dev->dev_ptr		= transport_dev;
+	dev->se_hba		= hba;
+	dev->se_sub_dev		= se_dev;
+	dev->transport		= transport;
+	dev->dev_link_magic	= SE_DEV_LINK_MAGIC;
+	INIT_LIST_HEAD(&dev->dev_list);
+	INIT_LIST_HEAD(&dev->dev_sep_list);
+	INIT_LIST_HEAD(&dev->dev_tmr_list);
+	INIT_LIST_HEAD(&dev->execute_task_list);
+	INIT_LIST_HEAD(&dev->delayed_cmd_list);
+	INIT_LIST_HEAD(&dev->state_task_list);
+	INIT_LIST_HEAD(&dev->qf_cmd_list);
+	spin_lock_init(&dev->execute_task_lock);
+	spin_lock_init(&dev->delayed_cmd_lock);
+	spin_lock_init(&dev->dev_reservation_lock);
+	spin_lock_init(&dev->dev_status_lock);
+	spin_lock_init(&dev->se_port_lock);
+	spin_lock_init(&dev->se_tmr_lock);
+	spin_lock_init(&dev->qf_cmd_lock);
+	atomic_set(&dev->dev_ordered_id, 0);
+
+	se_dev_set_default_attribs(dev, dev_limits);
+
+	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
+	dev->creation_time = get_jiffies_64();
+	spin_lock_init(&dev->stats_lock);
+
+	spin_lock(&hba->device_lock);
+	list_add_tail(&dev->dev_list, &hba->hba_dev_list);
+	hba->dev_count++;
+	spin_unlock(&hba->device_lock);
+	/*
+	 * Setup the SAM Task Attribute emulation for struct se_device
+	 */
+	core_setup_task_attr_emulation(dev);
+	/*
+	 * Force PR and ALUA passthrough emulation with internal object use.
+	 */
+	force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
+	/*
+	 * Setup the Reservations infrastructure for struct se_device
+	 */
+	core_setup_reservations(dev, force_pt);
+	/*
+	 * Setup the Asymmetric Logical Unit Assignment for struct se_device
+	 */
+	if (core_setup_alua(dev, force_pt) < 0)
+		goto out;
+
+	/*
+	 * Startup the struct se_device processing thread
+	 */
+	dev->process_thread = kthread_run(transport_processing_thread, dev,
+					  "LIO_%s", dev->transport->name);
+	if (IS_ERR(dev->process_thread)) {
+		pr_err("Unable to create kthread: LIO_%s\n",
+			dev->transport->name);
+		goto out;
+	}
+	/*
+	 * Setup work_queue for QUEUE_FULL
+	 */
+	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
+	/*
+	 * Preload the initial INQUIRY const values if we are doing
+	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
+	 * passthrough because this is being provided by the backend LLD.
+	 * This is required so that transport_get_inquiry() copies these
+	 * originals once back into DEV_T10_WWN(dev) for the virtual device
+	 * setup.
+	 */
+	if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
+		if (!inquiry_prod || !inquiry_rev) {
+			pr_err("All non TCM/pSCSI plugins require"
+				" INQUIRY consts\n");
+			goto out;
+		}
+
+		strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
+		strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
+		strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
+	}
+	scsi_dump_inquiry(dev);
+
+	return dev;
+out:
+	kthread_stop(dev->process_thread);
+
+	spin_lock(&hba->device_lock);
+	list_del(&dev->dev_list);
+	hba->dev_count--;
+	spin_unlock(&hba->device_lock);
+
+	se_release_vpd_for_dev(dev);
+
+	kfree(dev);
+
+	return NULL;
+}
+EXPORT_SYMBOL(transport_add_device_to_core_hba);
+
+/*	transport_generic_prepare_cdb():
+ *
+ *	Since the Initiator sees iSCSI devices as LUNs,  the SCSI CDB will
+ *	contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
+ *	The point of this is since we are mapping iSCSI LUNs to
+ *	SCSI Target IDs having a non-zero LUN in the CDB will throw the
+ *	devices and HBAs for a loop.
+ */
+static inline void transport_generic_prepare_cdb(
+	unsigned char *cdb)
+{
+	switch (cdb[0]) {
+	case READ_10: /* SBC - RDProtect */
+	case READ_12: /* SBC - RDProtect */
+	case READ_16: /* SBC - RDProtect */
+	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
+	case VERIFY: /* SBC - VRProtect */
+	case VERIFY_16: /* SBC - VRProtect */
+	case WRITE_VERIFY: /* SBC - VRProtect */
+	case WRITE_VERIFY_12: /* SBC - VRProtect */
+	case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
+		break;
+	default:
+		cdb[1] &= 0x1f; /* clear logical unit number */
+		break;
+	}
+}
+
+static struct se_task *
+transport_generic_get_task(struct se_cmd *cmd,
+		enum dma_data_direction data_direction)
+{
+	struct se_task *task;
+	struct se_device *dev = cmd->se_dev;
+
+	task = dev->transport->alloc_task(cmd->t_task_cdb);
+	if (!task) {
+		pr_err("Unable to allocate struct se_task\n");
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&task->t_list);
+	INIT_LIST_HEAD(&task->t_execute_list);
+	INIT_LIST_HEAD(&task->t_state_list);
+	init_completion(&task->task_stop_comp);
+	task->task_se_cmd = cmd;
+	task->task_data_direction = data_direction;
+
+	return task;
+}
+
+static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
+
+/*
+ * Used by fabric modules containing a local struct se_cmd within their
+ * fabric dependent per I/O descriptor.
+ */
+void transport_init_se_cmd(
+	struct se_cmd *cmd,
+	struct target_core_fabric_ops *tfo,
+	struct se_session *se_sess,
+	u32 data_length,
+	int data_direction,
+	int task_attr,
+	unsigned char *sense_buffer)
+{
+	INIT_LIST_HEAD(&cmd->se_lun_node);
+	INIT_LIST_HEAD(&cmd->se_delayed_node);
+	INIT_LIST_HEAD(&cmd->se_qf_node);
+	INIT_LIST_HEAD(&cmd->se_queue_node);
+	INIT_LIST_HEAD(&cmd->se_cmd_list);
+	INIT_LIST_HEAD(&cmd->t_task_list);
+	init_completion(&cmd->transport_lun_fe_stop_comp);
+	init_completion(&cmd->transport_lun_stop_comp);
+	init_completion(&cmd->t_transport_stop_comp);
+	init_completion(&cmd->cmd_wait_comp);
+	spin_lock_init(&cmd->t_state_lock);
+	cmd->transport_state = CMD_T_DEV_ACTIVE;
+
+	cmd->se_tfo = tfo;
+	cmd->se_sess = se_sess;
+	cmd->data_length = data_length;
+	cmd->data_direction = data_direction;
+	cmd->sam_task_attr = task_attr;
+	cmd->sense_buffer = sense_buffer;
+}
+EXPORT_SYMBOL(transport_init_se_cmd);
+
+static int transport_check_alloc_task_attr(struct se_cmd *cmd)
+{
+	/*
+	 * Check if SAM Task Attribute emulation is enabled for this
+	 * struct se_device storage object
+	 */
+	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+		return 0;
+
+	if (cmd->sam_task_attr == MSG_ACA_TAG) {
+		pr_debug("SAM Task Attribute ACA"
+			" emulation is not supported\n");
+		return -EINVAL;
+	}
+	/*
+	 * Used to determine when ORDERED commands should go from
+	 * Dormant to Active status.
+	 */
+	cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
+	smp_mb__after_atomic_inc();
+	pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
+			cmd->se_ordered_id, cmd->sam_task_attr,
+			cmd->se_dev->transport->name);
+	return 0;
+}
+
+/*	transport_generic_allocate_tasks():
+ *
+ *	Called from fabric RX Thread.
+ */
+int transport_generic_allocate_tasks(
+	struct se_cmd *cmd,
+	unsigned char *cdb)
+{
+	int ret;
+
+	transport_generic_prepare_cdb(cdb);
+	/*
+	 * Ensure that the received CDB is less than the max (252 + 8) bytes
+	 * for VARIABLE_LENGTH_CMD
+	 */
+	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
+		pr_err("Received SCSI CDB with command_size: %d that"
+			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
+			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
+		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+		return -EINVAL;
+	}
+	/*
+	 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
+	 * allocate the additional extended CDB buffer now..  Otherwise
+	 * setup the pointer from __t_task_cdb to t_task_cdb.
+	 */
+	if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
+		cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
+						GFP_KERNEL);
+		if (!cmd->t_task_cdb) {
+			pr_err("Unable to allocate cmd->t_task_cdb"
+				" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
+				scsi_command_size(cdb),
+				(unsigned long)sizeof(cmd->__t_task_cdb));
+			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			cmd->scsi_sense_reason =
+					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+			return -ENOMEM;
+		}
+	} else
+		cmd->t_task_cdb = &cmd->__t_task_cdb[0];
+	/*
+	 * Copy the original CDB into cmd->
+	 */
+	memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
+	/*
+	 * Setup the received CDB based on SCSI defined opcodes and
+	 * perform unit attention, persistent reservations and ALUA
+	 * checks for virtual device backends.  The cmd->t_task_cdb
+	 * pointer is expected to be setup before we reach this point.
+	 */
+	ret = transport_generic_cmd_sequencer(cmd, cdb);
+	if (ret < 0)
+		return ret;
+	/*
+	 * Check for SAM Task Attribute Emulation
+	 */
+	if (transport_check_alloc_task_attr(cmd) < 0) {
+		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+		return -EINVAL;
+	}
+	spin_lock(&cmd->se_lun->lun_sep_lock);
+	if (cmd->se_lun->lun_sep)
+		cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
+	spin_unlock(&cmd->se_lun->lun_sep_lock);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_allocate_tasks);
+
+/*
+ * Used by fabric module frontends to queue tasks directly.
+ * Many only be used from process context only
+ */
+int transport_handle_cdb_direct(
+	struct se_cmd *cmd)
+{
+	int ret;
+
+	if (!cmd->se_lun) {
+		dump_stack();
+		pr_err("cmd->se_lun is NULL\n");
+		return -EINVAL;
+	}
+	if (in_interrupt()) {
+		dump_stack();
+		pr_err("transport_generic_handle_cdb cannot be called"
+				" from interrupt context\n");
+		return -EINVAL;
+	}
+	/*
+	 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following
+	 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
+	 * in existing usage to ensure that outstanding descriptors are handled
+	 * correctly during shutdown via transport_wait_for_tasks()
+	 *
+	 * Also, we don't take cmd->t_state_lock here as we only expect
+	 * this to be called for initial descriptor submission.
+	 */
+	cmd->t_state = TRANSPORT_NEW_CMD;
+	cmd->transport_state |= CMD_T_ACTIVE;
+
+	/*
+	 * transport_generic_new_cmd() is already handling QUEUE_FULL,
+	 * so follow TRANSPORT_NEW_CMD processing thread context usage
+	 * and call transport_generic_request_failure() if necessary..
+	 */
+	ret = transport_generic_new_cmd(cmd);
+	if (ret < 0)
+		transport_generic_request_failure(cmd);
+
+	return 0;
+}
+EXPORT_SYMBOL(transport_handle_cdb_direct);
+
+/**
+ * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
+ *
+ * @se_cmd: command descriptor to submit
+ * @se_sess: associated se_sess for endpoint
+ * @cdb: pointer to SCSI CDB
+ * @sense: pointer to SCSI sense buffer
+ * @unpacked_lun: unpacked LUN to reference for struct se_lun
+ * @data_length: fabric expected data transfer length
+ * @task_addr: SAM task attribute
+ * @data_dir: DMA data direction
+ * @flags: flags for command submission from target_sc_flags_tables
+ *
+ * This may only be called from process context, and also currently
+ * assumes internal allocation of fabric payload buffer by target-core.
+ **/
+void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+		unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
+		u32 data_length, int task_attr, int data_dir, int flags)
+{
+	struct se_portal_group *se_tpg;
+	int rc;
+
+	se_tpg = se_sess->se_tpg;
+	BUG_ON(!se_tpg);
+	BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
+	BUG_ON(in_interrupt());
+	/*
+	 * Initialize se_cmd for target operation.  From this point
+	 * exceptions are handled by sending exception status via
+	 * target_core_fabric_ops->queue_status() callback
+	 */
+	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
+				data_length, data_dir, task_attr, sense);
+	/*
+	 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
+	 * se_sess->sess_cmd_list.  A second kref_get here is necessary
+	 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
+	 * kref_put() to happen during fabric packet acknowledgement.
+	 */
+	target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+	/*
+	 * Signal bidirectional data payloads to target-core
+	 */
+	if (flags & TARGET_SCF_BIDI_OP)
+		se_cmd->se_cmd_flags |= SCF_BIDI;
+	/*
+	 * Locate se_lun pointer and attach it to struct se_cmd
+	 */
+	if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
+		transport_send_check_condition_and_sense(se_cmd,
+				se_cmd->scsi_sense_reason, 0);
+		target_put_sess_cmd(se_sess, se_cmd);
+		return;
+	}
+	/*
+	 * Sanitize CDBs via transport_generic_cmd_sequencer() and
+	 * allocate the necessary tasks to complete the received CDB+data
+	 */
+	rc = transport_generic_allocate_tasks(se_cmd, cdb);
+	if (rc != 0) {
+		transport_generic_request_failure(se_cmd);
+		return;
+	}
+	/*
+	 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
+	 * for immediate execution of READs, otherwise wait for
+	 * transport_generic_handle_data() to be called for WRITEs
+	 * when fabric has filled the incoming buffer.
+	 */
+	transport_handle_cdb_direct(se_cmd);
+	return;
+}
+EXPORT_SYMBOL(target_submit_cmd);
+
+static void target_complete_tmr_failure(struct work_struct *work)
+{
+	struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
+
+	se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
+	se_cmd->se_tfo->queue_tm_rsp(se_cmd);
+
+	transport_cmd_check_stop_to_fabric(se_cmd);
+}
+
+/**
+ * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
+ *                     for TMR CDBs
+ *
+ * @se_cmd: command descriptor to submit
+ * @se_sess: associated se_sess for endpoint
+ * @sense: pointer to SCSI sense buffer
+ * @unpacked_lun: unpacked LUN to reference for struct se_lun
+ * @fabric_context: fabric context for TMR req
+ * @tm_type: Type of TM request
+ * @gfp: gfp type for caller
+ * @tag: referenced task tag for TMR_ABORT_TASK
+ * @flags: submit cmd flags
+ *
+ * Callable from all contexts.
+ **/
+
+int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
+		unsigned char *sense, u32 unpacked_lun,
+		void *fabric_tmr_ptr, unsigned char tm_type,
+		gfp_t gfp, unsigned int tag, int flags)
+{
+	struct se_portal_group *se_tpg;
+	int ret;
+
+	se_tpg = se_sess->se_tpg;
+	BUG_ON(!se_tpg);
+
+	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
+			      0, DMA_NONE, MSG_SIMPLE_TAG, sense);
+	/*
+	 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
+	 * allocation failure.
+	 */
+	ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
+	if (ret < 0)
+		return -ENOMEM;
+
+	if (tm_type == TMR_ABORT_TASK)
+		se_cmd->se_tmr_req->ref_task_tag = tag;
+
+	/* See target_submit_cmd for commentary */
+	target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+
+	ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
+	if (ret) {
+		/*
+		 * For callback during failure handling, push this work off
+		 * to process context with TMR_LUN_DOES_NOT_EXIST status.
+		 */
+		INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
+		schedule_work(&se_cmd->work);
+		return 0;
+	}
+	transport_generic_handle_tmr(se_cmd);
+	return 0;
+}
+EXPORT_SYMBOL(target_submit_tmr);
+
+/*
+ * Used by fabric module frontends defining a TFO->new_cmd_map() caller
+ * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
+ * complete setup in TCM process context w/ TFO->new_cmd_map().
+ */
+int transport_generic_handle_cdb_map(
+	struct se_cmd *cmd)
+{
+	if (!cmd->se_lun) {
+		dump_stack();
+		pr_err("cmd->se_lun is NULL\n");
+		return -EINVAL;
+	}
+
+	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_cdb_map);
+
+/*	transport_generic_handle_data():
+ *
+ *
+ */
+int transport_generic_handle_data(
+	struct se_cmd *cmd)
+{
+	/*
+	 * For the software fabric case, then we assume the nexus is being
+	 * failed/shutdown when signals are pending from the kthread context
+	 * caller, so we return a failure.  For the HW target mode case running
+	 * in interrupt code, the signal_pending() check is skipped.
+	 */
+	if (!in_interrupt() && signal_pending(current))
+		return -EPERM;
+	/*
+	 * If the received CDB has aleady been ABORTED by the generic
+	 * target engine, we now call transport_check_aborted_status()
+	 * to queue any delated TASK_ABORTED status for the received CDB to the
+	 * fabric module as we are expecting no further incoming DATA OUT
+	 * sequences at this point.
+	 */
+	if (transport_check_aborted_status(cmd, 1) != 0)
+		return 0;
+
+	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_data);
+
+/*	transport_generic_handle_tmr():
+ *
+ *
+ */
+int transport_generic_handle_tmr(
+	struct se_cmd *cmd)
+{
+	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_tmr);
+
+/*
+ * If the task is active, request it to be stopped and sleep until it
+ * has completed.
+ */
+bool target_stop_task(struct se_task *task, unsigned long *flags)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	bool was_active = false;
+
+	if (task->task_flags & TF_ACTIVE) {
+		task->task_flags |= TF_REQUEST_STOP;
+		spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
+
+		pr_debug("Task %p waiting to complete\n", task);
+		wait_for_completion(&task->task_stop_comp);
+		pr_debug("Task %p stopped successfully\n", task);
+
+		spin_lock_irqsave(&cmd->t_state_lock, *flags);
+		atomic_dec(&cmd->t_task_cdbs_left);
+		task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
+		was_active = true;
+	}
+
+	return was_active;
+}
+
+static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
+{
+	struct se_task *task, *task_tmp;
+	unsigned long flags;
+	int ret = 0;
+
+	pr_debug("ITT[0x%08x] - Stopping tasks\n",
+		cmd->se_tfo->get_task_tag(cmd));
+
+	/*
+	 * No tasks remain in the execution queue
+	 */
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	list_for_each_entry_safe(task, task_tmp,
+				&cmd->t_task_list, t_list) {
+		pr_debug("Processing task %p\n", task);
+		/*
+		 * If the struct se_task has not been sent and is not active,
+		 * remove the struct se_task from the execution queue.
+		 */
+		if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
+			spin_unlock_irqrestore(&cmd->t_state_lock,
+					flags);
+			transport_remove_task_from_execute_queue(task,
+					cmd->se_dev);
+
+			pr_debug("Task %p removed from execute queue\n", task);
+			spin_lock_irqsave(&cmd->t_state_lock, flags);
+			continue;
+		}
+
+		if (!target_stop_task(task, &flags)) {
+			pr_debug("Task %p - did nothing\n", task);
+			ret++;
+		}
+	}
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	return ret;
+}
+
+/*
+ * Handle SAM-esque emulation for generic transport request failures.
+ */
+void transport_generic_request_failure(struct se_cmd *cmd)
+{
+	int ret = 0;
+
+	pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
+		" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
+		cmd->t_task_cdb[0]);
+	pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
+		cmd->se_tfo->get_cmd_state(cmd),
+		cmd->t_state, cmd->scsi_sense_reason);
+	pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
+		" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
+		" CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
+		cmd->t_task_list_num,
+		atomic_read(&cmd->t_task_cdbs_left),
+		atomic_read(&cmd->t_task_cdbs_sent),
+		atomic_read(&cmd->t_task_cdbs_ex_left),
+		(cmd->transport_state & CMD_T_ACTIVE) != 0,
+		(cmd->transport_state & CMD_T_STOP) != 0,
+		(cmd->transport_state & CMD_T_SENT) != 0);
+
+	/*
+	 * For SAM Task Attribute emulation for failed struct se_cmd
+	 */
+	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+		transport_complete_task_attr(cmd);
+
+	switch (cmd->scsi_sense_reason) {
+	case TCM_NON_EXISTENT_LUN:
+	case TCM_UNSUPPORTED_SCSI_OPCODE:
+	case TCM_INVALID_CDB_FIELD:
+	case TCM_INVALID_PARAMETER_LIST:
+	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+	case TCM_UNKNOWN_MODE_PAGE:
+	case TCM_WRITE_PROTECTED:
+	case TCM_ADDRESS_OUT_OF_RANGE:
+	case TCM_CHECK_CONDITION_ABORT_CMD:
+	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+	case TCM_CHECK_CONDITION_NOT_READY:
+		break;
+	case TCM_RESERVATION_CONFLICT:
+		/*
+		 * No SENSE Data payload for this case, set SCSI Status
+		 * and queue the response to $FABRIC_MOD.
+		 *
+		 * Uses linux/include/scsi/scsi.h SAM status codes defs
+		 */
+		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+		/*
+		 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
+		 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
+		 * CONFLICT STATUS.
+		 *
+		 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
+		 */
+		if (cmd->se_sess &&
+		    cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
+			core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
+				cmd->orig_fe_lun, 0x2C,
+				ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
+
+		ret = cmd->se_tfo->queue_status(cmd);
+		if (ret == -EAGAIN || ret == -ENOMEM)
+			goto queue_full;
+		goto check_stop;
+	default:
+		pr_err("Unknown transport error for CDB 0x%02x: %d\n",
+			cmd->t_task_cdb[0], cmd->scsi_sense_reason);
+		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+		break;
+	}
+	/*
+	 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
+	 * make the call to transport_send_check_condition_and_sense()
+	 * directly.  Otherwise expect the fabric to make the call to
+	 * transport_send_check_condition_and_sense() after handling
+	 * possible unsoliticied write data payloads.
+	 */
+	ret = transport_send_check_condition_and_sense(cmd,
+			cmd->scsi_sense_reason, 0);
+	if (ret == -EAGAIN || ret == -ENOMEM)
+		goto queue_full;
+
+check_stop:
+	transport_lun_remove_cmd(cmd);
+	if (!transport_cmd_check_stop_to_fabric(cmd))
+		;
+	return;
+
+queue_full:
+	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
+	transport_handle_queue_full(cmd, cmd->se_dev);
+}
+EXPORT_SYMBOL(transport_generic_request_failure);
+
+static inline u32 transport_lba_21(unsigned char *cdb)
+{
+	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
+}
+
+static inline u32 transport_lba_32(unsigned char *cdb)
+{
+	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+}
+
+static inline unsigned long long transport_lba_64(unsigned char *cdb)
+{
+	unsigned int __v1, __v2;
+
+	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+
+	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+/*
+ * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
+ */
+static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
+{
+	unsigned int __v1, __v2;
+
+	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
+	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
+
+	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+	se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
+	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+}
+
+/*
+ * Called from Fabric Module context from transport_execute_tasks()
+ *
+ * The return of this function determins if the tasks from struct se_cmd
+ * get added to the execution queue in transport_execute_tasks(),
+ * or are added to the delayed or ordered lists here.
+ */
+static inline int transport_execute_task_attr(struct se_cmd *cmd)
+{
+	if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+		return 1;
+	/*
+	 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
+	 * to allow the passed struct se_cmd list of tasks to the front of the list.
+	 */
+	 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
+		pr_debug("Added HEAD_OF_QUEUE for CDB:"
+			" 0x%02x, se_ordered_id: %u\n",
+			cmd->t_task_cdb[0],
+			cmd->se_ordered_id);
+		return 1;
+	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
+		atomic_inc(&cmd->se_dev->dev_ordered_sync);
+		smp_mb__after_atomic_inc();
+
+		pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
+				" list, se_ordered_id: %u\n",
+				cmd->t_task_cdb[0],
+				cmd->se_ordered_id);
+		/*
+		 * Add ORDERED command to tail of execution queue if
+		 * no other older commands exist that need to be
+		 * completed first.
+		 */
+		if (!atomic_read(&cmd->se_dev->simple_cmds))
+			return 1;
+	} else {
+		/*
+		 * For SIMPLE and UNTAGGED Task Attribute commands
+		 */
+		atomic_inc(&cmd->se_dev->simple_cmds);
+		smp_mb__after_atomic_inc();
+	}
+	/*
+	 * Otherwise if one or more outstanding ORDERED task attribute exist,
+	 * add the dormant task(s) built for the passed struct se_cmd to the
+	 * execution queue and become in Active state for this struct se_device.
+	 */
+	if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
+		/*
+		 * Otherwise, add cmd w/ tasks to delayed cmd queue that
+		 * will be drained upon completion of HEAD_OF_QUEUE task.
+		 */
+		spin_lock(&cmd->se_dev->delayed_cmd_lock);
+		cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
+		list_add_tail(&cmd->se_delayed_node,
+				&cmd->se_dev->delayed_cmd_list);
+		spin_unlock(&cmd->se_dev->delayed_cmd_lock);
+
+		pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
+			" delayed CMD list, se_ordered_id: %u\n",
+			cmd->t_task_cdb[0], cmd->sam_task_attr,
+			cmd->se_ordered_id);
+		/*
+		 * Return zero to let transport_execute_tasks() know
+		 * not to add the delayed tasks to the execution list.
+		 */
+		return 0;
+	}
+	/*
+	 * Otherwise, no ORDERED task attributes exist..
+	 */
+	return 1;
+}
+
+/*
+ * Called from fabric module context in transport_generic_new_cmd() and
+ * transport_generic_process_write()
+ */
+static int transport_execute_tasks(struct se_cmd *cmd)
+{
+	int add_tasks;
+	struct se_device *se_dev = cmd->se_dev;
+	/*
+	 * Call transport_cmd_check_stop() to see if a fabric exception
+	 * has occurred that prevents execution.
+	 */
+	if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
+		/*
+		 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
+		 * attribute for the tasks of the received struct se_cmd CDB
+		 */
+		add_tasks = transport_execute_task_attr(cmd);
+		if (!add_tasks)
+			goto execute_tasks;
+		/*
+		 * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
+		 * adds associated se_tasks while holding dev->execute_task_lock
+		 * before I/O dispath to avoid a double spinlock access.
+		 */
+		__transport_execute_tasks(se_dev, cmd);
+		return 0;
+	}
+
+execute_tasks:
+	__transport_execute_tasks(se_dev, NULL);
+	return 0;
+}
+
+/*
+ * Called to check struct se_device tcq depth window, and once open pull struct se_task
+ * from struct se_device->execute_task_list and
+ *
+ * Called from transport_processing_thread()
+ */
+static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
+{
+	int error;
+	struct se_cmd *cmd = NULL;
+	struct se_task *task = NULL;
+	unsigned long flags;
+
+check_depth:
+	spin_lock_irq(&dev->execute_task_lock);
+	if (new_cmd != NULL)
+		__transport_add_tasks_from_cmd(new_cmd);
+
+	if (list_empty(&dev->execute_task_list)) {
+		spin_unlock_irq(&dev->execute_task_lock);
+		return 0;
+	}
+	task = list_first_entry(&dev->execute_task_list,
+				struct se_task, t_execute_list);
+	__transport_remove_task_from_execute_queue(task, dev);
+	spin_unlock_irq(&dev->execute_task_lock);
+
+	cmd = task->task_se_cmd;
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	task->task_flags |= (TF_ACTIVE | TF_SENT);
+	atomic_inc(&cmd->t_task_cdbs_sent);
+
+	if (atomic_read(&cmd->t_task_cdbs_sent) ==
+	    cmd->t_task_list_num)
+		cmd->transport_state |= CMD_T_SENT;
+
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	if (cmd->execute_task)
+		error = cmd->execute_task(task);
+	else
+		error = dev->transport->do_task(task);
+	if (error != 0) {
+		spin_lock_irqsave(&cmd->t_state_lock, flags);
+		task->task_flags &= ~TF_ACTIVE;
+		cmd->transport_state &= ~CMD_T_SENT;
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+		transport_stop_tasks_for_cmd(cmd);
+		transport_generic_request_failure(cmd);
+	}
+
+	new_cmd = NULL;
+	goto check_depth;
+
+	return 0;
+}
+
+static inline u32 transport_get_sectors_6(
+	unsigned char *cdb,
+	struct se_cmd *cmd,
+	int *ret)
+{
+	struct se_device *dev = cmd->se_dev;
+
+	/*
+	 * Assume TYPE_DISK for non struct se_device objects.
+	 * Use 8-bit sector value.
+	 */
+	if (!dev)
+		goto type_disk;
+
+	/*
+	 * Use 24-bit allocation length for TYPE_TAPE.
+	 */
+	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
+		return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
+
+	/*
+	 * Everything else assume TYPE_DISK Sector CDB location.
+	 * Use 8-bit sector value.  SBC-3 says:
+	 *
+	 *   A TRANSFER LENGTH field set to zero specifies that 256
+	 *   logical blocks shall be written.  Any other value
+	 *   specifies the number of logical blocks that shall be
+	 *   written.
+	 */
+type_disk:
+	return cdb[4] ? : 256;
+}
+
+static inline u32 transport_get_sectors_10(
+	unsigned char *cdb,
+	struct se_cmd *cmd,
+	int *ret)
+{
+	struct se_device *dev = cmd->se_dev;
+
+	/*
+	 * Assume TYPE_DISK for non struct se_device objects.
+	 * Use 16-bit sector value.
+	 */
+	if (!dev)
+		goto type_disk;
+
+	/*
+	 * XXX_10 is not defined in SSC, throw an exception
+	 */
+	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
+		*ret = -EINVAL;
+		return 0;
+	}
+
+	/*
+	 * Everything else assume TYPE_DISK Sector CDB location.
+	 * Use 16-bit sector value.
+	 */
+type_disk:
+	return (u32)(cdb[7] << 8) + cdb[8];
+}
+
+static inline u32 transport_get_sectors_12(
+	unsigned char *cdb,
+	struct se_cmd *cmd,
+	int *ret)
+{
+	struct se_device *dev = cmd->se_dev;
+
+	/*
+	 * Assume TYPE_DISK for non struct se_device objects.
+	 * Use 32-bit sector value.
+	 */
+	if (!dev)
+		goto type_disk;
+
+	/*
+	 * XXX_12 is not defined in SSC, throw an exception
+	 */
+	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
+		*ret = -EINVAL;
+		return 0;
+	}
+
+	/*
+	 * Everything else assume TYPE_DISK Sector CDB location.
+	 * Use 32-bit sector value.
+	 */
+type_disk:
+	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
+}
+
+static inline u32 transport_get_sectors_16(
+	unsigned char *cdb,
+	struct se_cmd *cmd,
+	int *ret)
+{
+	struct se_device *dev = cmd->se_dev;
+
+	/*
+	 * Assume TYPE_DISK for non struct se_device objects.
+	 * Use 32-bit sector value.
+	 */
+	if (!dev)
+		goto type_disk;
+
+	/*
+	 * Use 24-bit allocation length for TYPE_TAPE.
+	 */
+	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
+		return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
+
+type_disk:
+	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
+		    (cdb[12] << 8) + cdb[13];
+}
+
+/*
+ * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
+ */
+static inline u32 transport_get_sectors_32(
+	unsigned char *cdb,
+	struct se_cmd *cmd,
+	int *ret)
+{
+	/*
+	 * Assume TYPE_DISK for non struct se_device objects.
+	 * Use 32-bit sector value.
+	 */
+	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
+		    (cdb[30] << 8) + cdb[31];
+
+}
+
+static inline u32 transport_get_size(
+	u32 sectors,
+	unsigned char *cdb,
+	struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+
+	if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
+		if (cdb[1] & 1) { /* sectors */
+			return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
+		} else /* bytes */
+			return sectors;
+	}
+#if 0
+	pr_debug("Returning block_size: %u, sectors: %u == %u for"
+			" %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
+			dev->se_sub_dev->se_dev_attrib.block_size * sectors,
+			dev->transport->name);
+#endif
+	return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
+}
+
+static void transport_xor_callback(struct se_cmd *cmd)
+{
+	unsigned char *buf, *addr;
+	struct scatterlist *sg;
+	unsigned int offset;
+	int i;
+	int count;
+	/*
+	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
+	 *
+	 * 1) read the specified logical block(s);
+	 * 2) transfer logical blocks from the data-out buffer;
+	 * 3) XOR the logical blocks transferred from the data-out buffer with
+	 *    the logical blocks read, storing the resulting XOR data in a buffer;
+	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
+	 *    blocks transferred from the data-out buffer; and
+	 * 5) transfer the resulting XOR data to the data-in buffer.
+	 */
+	buf = kmalloc(cmd->data_length, GFP_KERNEL);
+	if (!buf) {
+		pr_err("Unable to allocate xor_callback buf\n");
+		return;
+	}
+	/*
+	 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
+	 * into the locally allocated *buf
+	 */
+	sg_copy_to_buffer(cmd->t_data_sg,
+			  cmd->t_data_nents,
+			  buf,
+			  cmd->data_length);
+
+	/*
+	 * Now perform the XOR against the BIDI read memory located at
+	 * cmd->t_mem_bidi_list
+	 */
+
+	offset = 0;
+	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
+		addr = kmap_atomic(sg_page(sg));
+		if (!addr)
+			goto out;
+
+		for (i = 0; i < sg->length; i++)
+			*(addr + sg->offset + i) ^= *(buf + offset + i);
+
+		offset += sg->length;
+		kunmap_atomic(addr);
+	}
+
+out:
+	kfree(buf);
+}
+
+/*
+ * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
+ */
+static int transport_get_sense_data(struct se_cmd *cmd)
+{
+	unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
+	struct se_device *dev = cmd->se_dev;
+	struct se_task *task = NULL, *task_tmp;
+	unsigned long flags;
+	u32 offset = 0;
+
+	WARN_ON(!cmd->se_lun);
+
+	if (!dev)
+		return 0;
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		return 0;
+	}
+
+	list_for_each_entry_safe(task, task_tmp,
+				&cmd->t_task_list, t_list) {
+		if (!(task->task_flags & TF_HAS_SENSE))
+			continue;
+
+		if (!dev->transport->get_sense_buffer) {
+			pr_err("dev->transport->get_sense_buffer"
+					" is NULL\n");
+			continue;
+		}
+
+		sense_buffer = dev->transport->get_sense_buffer(task);
+		if (!sense_buffer) {
+			pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"
+				" sense buffer for task with sense\n",
+				cmd->se_tfo->get_task_tag(cmd), task);
+			continue;
+		}
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+		offset = cmd->se_tfo->set_fabric_sense_len(cmd,
+				TRANSPORT_SENSE_BUFFER);
+
+		memcpy(&buffer[offset], sense_buffer,
+				TRANSPORT_SENSE_BUFFER);
+		cmd->scsi_status = task->task_scsi_status;
+		/* Automatically padded */
+		cmd->scsi_sense_length =
+				(TRANSPORT_SENSE_BUFFER + offset);
+
+		pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
+				" and sense\n",
+			dev->se_hba->hba_id, dev->transport->name,
+				cmd->scsi_status);
+		return 0;
+	}
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	return -1;
+}
+
+static inline long long transport_dev_end_lba(struct se_device *dev)
+{
+	return dev->transport->get_blocks(dev) + 1;
+}
+
+static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	u32 sectors;
+
+	if (dev->transport->get_device_type(dev) != TYPE_DISK)
+		return 0;
+
+	sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
+
+	if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
+		pr_err("LBA: %llu Sectors: %u exceeds"
+			" transport_dev_end_lba(): %llu\n",
+			cmd->t_task_lba, sectors,
+			transport_dev_end_lba(dev));
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
+{
+	/*
+	 * Determine if the received WRITE_SAME is used to for direct
+	 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
+	 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
+	 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
+	 */
+	int passthrough = (dev->transport->transport_type ==
+				TRANSPORT_PLUGIN_PHBA_PDEV);
+
+	if (!passthrough) {
+		if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
+			pr_err("WRITE_SAME PBDATA and LBDATA"
+				" bits not supported for Block Discard"
+				" Emulation\n");
+			return -ENOSYS;
+		}
+		/*
+		 * Currently for the emulated case we only accept
+		 * tpws with the UNMAP=1 bit set.
+		 */
+		if (!(flags[0] & 0x08)) {
+			pr_err("WRITE_SAME w/o UNMAP bit not"
+				" supported for Block Discard Emulation\n");
+			return -ENOSYS;
+		}
+	}
+
+	return 0;
+}
+
+/*	transport_generic_cmd_sequencer():
+ *
+ *	Generic Command Sequencer that should work for most DAS transport
+ *	drivers.
+ *
+ *	Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
+ *	RX Thread.
+ *
+ *	FIXME: Need to support other SCSI OPCODES where as well.
+ */
+static int transport_generic_cmd_sequencer(
+	struct se_cmd *cmd,
+	unsigned char *cdb)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	int ret = 0, sector_ret = 0, passthrough;
+	u32 sectors = 0, size = 0, pr_reg_type = 0;
+	u16 service_action;
+	u8 alua_ascq = 0;
+	/*
+	 * Check for an existing UNIT ATTENTION condition
+	 */
+	if (core_scsi3_ua_check(cmd, cdb) < 0) {
+		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
+		return -EINVAL;
+	}
+	/*
+	 * Check status of Asymmetric Logical Unit Assignment port
+	 */
+	ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
+	if (ret != 0) {
+		/*
+		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
+		 * The ALUA additional sense code qualifier (ASCQ) is determined
+		 * by the ALUA primary or secondary access state..
+		 */
+		if (ret > 0) {
+#if 0
+			pr_debug("[%s]: ALUA TG Port not available,"
+				" SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
+				cmd->se_tfo->get_fabric_name(), alua_ascq);
+#endif
+			transport_set_sense_codes(cmd, 0x04, alua_ascq);
+			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
+			return -EINVAL;
+		}
+		goto out_invalid_cdb_field;
+	}
+	/*
+	 * Check status for SPC-3 Persistent Reservations
+	 */
+	if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
+		if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
+					cmd, cdb, pr_reg_type) != 0) {
+			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+			cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+			cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+			return -EBUSY;
+		}
+		/*
+		 * This means the CDB is allowed for the SCSI Initiator port
+		 * when said port is *NOT* holding the legacy SPC-2 or
+		 * SPC-3 Persistent Reservation.
+		 */
+	}
+
+	/*
+	 * If we operate in passthrough mode we skip most CDB emulation and
+	 * instead hand the commands down to the physical SCSI device.
+	 */
+	passthrough =
+		(dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
+
+	switch (cdb[0]) {
+	case READ_6:
+		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->t_task_lba = transport_lba_21(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case READ_10:
+		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->t_task_lba = transport_lba_32(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case READ_12:
+		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->t_task_lba = transport_lba_32(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case READ_16:
+		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->t_task_lba = transport_lba_64(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case WRITE_6:
+		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->t_task_lba = transport_lba_21(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case WRITE_10:
+		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->t_task_lba = transport_lba_32(cdb);
+		if (cdb[1] & 0x8)
+			cmd->se_cmd_flags |= SCF_FUA;
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case WRITE_12:
+		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->t_task_lba = transport_lba_32(cdb);
+		if (cdb[1] & 0x8)
+			cmd->se_cmd_flags |= SCF_FUA;
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case WRITE_16:
+		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->t_task_lba = transport_lba_64(cdb);
+		if (cdb[1] & 0x8)
+			cmd->se_cmd_flags |= SCF_FUA;
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case XDWRITEREAD_10:
+		if ((cmd->data_direction != DMA_TO_DEVICE) ||
+		    !(cmd->se_cmd_flags & SCF_BIDI))
+			goto out_invalid_cdb_field;
+		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->t_task_lba = transport_lba_32(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+
+		/*
+		 * Do now allow BIDI commands for passthrough mode.
+		 */
+		if (passthrough)
+			goto out_unsupported_cdb;
+
+		/*
+		 * Setup BIDI XOR callback to be run after I/O completion.
+		 */
+		cmd->transport_complete_callback = &transport_xor_callback;
+		if (cdb[1] & 0x8)
+			cmd->se_cmd_flags |= SCF_FUA;
+		break;
+	case VARIABLE_LENGTH_CMD:
+		service_action = get_unaligned_be16(&cdb[8]);
+		switch (service_action) {
+		case XDWRITEREAD_32:
+			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
+			if (sector_ret)
+				goto out_unsupported_cdb;
+			size = transport_get_size(sectors, cdb, cmd);
+			/*
+			 * Use WRITE_32 and READ_32 opcodes for the emulated
+			 * XDWRITE_READ_32 logic.
+			 */
+			cmd->t_task_lba = transport_lba_64_ext(cdb);
+			cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+
+			/*
+			 * Do now allow BIDI commands for passthrough mode.
+			 */
+			if (passthrough)
+				goto out_unsupported_cdb;
+
+			/*
+			 * Setup BIDI XOR callback to be run during after I/O
+			 * completion.
+			 */
+			cmd->transport_complete_callback = &transport_xor_callback;
+			if (cdb[1] & 0x8)
+				cmd->se_cmd_flags |= SCF_FUA;
+			break;
+		case WRITE_SAME_32:
+			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
+			if (sector_ret)
+				goto out_unsupported_cdb;
+
+			if (sectors)
+				size = transport_get_size(1, cdb, cmd);
+			else {
+				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
+				       " supported\n");
+				goto out_invalid_cdb_field;
+			}
+
+			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
+			cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+
+			if (target_check_write_same_discard(&cdb[10], dev) < 0)
+				goto out_unsupported_cdb;
+			if (!passthrough)
+				cmd->execute_task = target_emulate_write_same;
+			break;
+		default:
+			pr_err("VARIABLE_LENGTH_CMD service action"
+				" 0x%04x not supported\n", service_action);
+			goto out_unsupported_cdb;
+		}
+		break;
+	case MAINTENANCE_IN:
+		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
+			/* MAINTENANCE_IN from SCC-2 */
+			/*
+			 * Check for emulated MI_REPORT_TARGET_PGS.
+			 */
+			if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
+			    su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+				cmd->execute_task =
+					target_emulate_report_target_port_groups;
+			}
+			size = (cdb[6] << 24) | (cdb[7] << 16) |
+			       (cdb[8] << 8) | cdb[9];
+		} else {
+			/* GPCMD_SEND_KEY from multi media commands */
+			size = (cdb[8] << 8) + cdb[9];
+		}
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case MODE_SELECT:
+		size = cdb[4];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case MODE_SELECT_10:
+		size = (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case MODE_SENSE:
+		size = cdb[4];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		if (!passthrough)
+			cmd->execute_task = target_emulate_modesense;
+		break;
+	case MODE_SENSE_10:
+		size = (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		if (!passthrough)
+			cmd->execute_task = target_emulate_modesense;
+		break;
+	case GPCMD_READ_BUFFER_CAPACITY:
+	case GPCMD_SEND_OPC:
+	case LOG_SELECT:
+	case LOG_SENSE:
+		size = (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case READ_BLOCK_LIMITS:
+		size = READ_BLOCK_LEN;
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case GPCMD_GET_CONFIGURATION:
+	case GPCMD_READ_FORMAT_CAPACITIES:
+	case GPCMD_READ_DISC_INFO:
+	case GPCMD_READ_TRACK_RZONE_INFO:
+		size = (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case PERSISTENT_RESERVE_IN:
+		if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
+			cmd->execute_task = target_scsi3_emulate_pr_in;
+		size = (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case PERSISTENT_RESERVE_OUT:
+		if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
+			cmd->execute_task = target_scsi3_emulate_pr_out;
+		size = (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case GPCMD_MECHANISM_STATUS:
+	case GPCMD_READ_DVD_STRUCTURE:
+		size = (cdb[8] << 8) + cdb[9];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case READ_POSITION:
+		size = READ_POSITION_LEN;
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case MAINTENANCE_OUT:
+		if (dev->transport->get_device_type(dev) != TYPE_ROM) {
+			/* MAINTENANCE_OUT from SCC-2
+			 *
+			 * Check for emulated MO_SET_TARGET_PGS.
+			 */
+			if (cdb[1] == MO_SET_TARGET_PGS &&
+			    su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+				cmd->execute_task =
+					target_emulate_set_target_port_groups;
+			}
+
+			size = (cdb[6] << 24) | (cdb[7] << 16) |
+			       (cdb[8] << 8) | cdb[9];
+		} else  {
+			/* GPCMD_REPORT_KEY from multi media commands */
+			size = (cdb[8] << 8) + cdb[9];
+		}
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case INQUIRY:
+		size = (cdb[3] << 8) + cdb[4];
+		/*
+		 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
+		 * See spc4r17 section 5.3
+		 */
+		if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+			cmd->sam_task_attr = MSG_HEAD_TAG;
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		if (!passthrough)
+			cmd->execute_task = target_emulate_inquiry;
+		break;
+	case READ_BUFFER:
+		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case READ_CAPACITY:
+		size = READ_CAP_LEN;
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		if (!passthrough)
+			cmd->execute_task = target_emulate_readcapacity;
+		break;
+	case READ_MEDIA_SERIAL_NUMBER:
+	case SECURITY_PROTOCOL_IN:
+	case SECURITY_PROTOCOL_OUT:
+		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case SERVICE_ACTION_IN:
+		switch (cmd->t_task_cdb[1] & 0x1f) {
+		case SAI_READ_CAPACITY_16:
+			if (!passthrough)
+				cmd->execute_task =
+					target_emulate_readcapacity_16;
+			break;
+		default:
+			if (passthrough)
+				break;
+
+			pr_err("Unsupported SA: 0x%02x\n",
+				cmd->t_task_cdb[1] & 0x1f);
+			goto out_invalid_cdb_field;
+		}
+		/*FALLTHROUGH*/
+	case ACCESS_CONTROL_IN:
+	case ACCESS_CONTROL_OUT:
+	case EXTENDED_COPY:
+	case READ_ATTRIBUTE:
+	case RECEIVE_COPY_RESULTS:
+	case WRITE_ATTRIBUTE:
+		size = (cdb[10] << 24) | (cdb[11] << 16) |
+		       (cdb[12] << 8) | cdb[13];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case RECEIVE_DIAGNOSTIC:
+	case SEND_DIAGNOSTIC:
+		size = (cdb[3] << 8) | cdb[4];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
+#if 0
+	case GPCMD_READ_CD:
+		sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+		size = (2336 * sectors);
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+#endif
+	case READ_TOC:
+		size = cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case REQUEST_SENSE:
+		size = cdb[4];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		if (!passthrough)
+			cmd->execute_task = target_emulate_request_sense;
+		break;
+	case READ_ELEMENT_STATUS:
+		size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case WRITE_BUFFER:
+		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case RESERVE:
+	case RESERVE_10:
+		/*
+		 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
+		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
+		 */
+		if (cdb[0] == RESERVE_10)
+			size = (cdb[7] << 8) | cdb[8];
+		else
+			size = cmd->data_length;
+
+		/*
+		 * Setup the legacy emulated handler for SPC-2 and
+		 * >= SPC-3 compatible reservation handling (CRH=1)
+		 * Otherwise, we assume the underlying SCSI logic is
+		 * is running in SPC_PASSTHROUGH, and wants reservations
+		 * emulation disabled.
+		 */
+		if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
+			cmd->execute_task = target_scsi2_reservation_reserve;
+		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+		break;
+	case RELEASE:
+	case RELEASE_10:
+		/*
+		 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
+		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
+		*/
+		if (cdb[0] == RELEASE_10)
+			size = (cdb[7] << 8) | cdb[8];
+		else
+			size = cmd->data_length;
+
+		if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
+			cmd->execute_task = target_scsi2_reservation_release;
+		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+		break;
+	case SYNCHRONIZE_CACHE:
+	case SYNCHRONIZE_CACHE_16:
+		/*
+		 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
+		 */
+		if (cdb[0] == SYNCHRONIZE_CACHE) {
+			sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+			cmd->t_task_lba = transport_lba_32(cdb);
+		} else {
+			sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+			cmd->t_task_lba = transport_lba_64(cdb);
+		}
+		if (sector_ret)
+			goto out_unsupported_cdb;
+
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+
+		if (passthrough)
+			break;
+
+		/*
+		 * Check to ensure that LBA + Range does not exceed past end of
+		 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
+		 */
+		if ((cmd->t_task_lba != 0) || (sectors != 0)) {
+			if (transport_cmd_get_valid_sectors(cmd) < 0)
+				goto out_invalid_cdb_field;
+		}
+		cmd->execute_task = target_emulate_synchronize_cache;
+		break;
+	case UNMAP:
+		size = get_unaligned_be16(&cdb[7]);
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		if (!passthrough)
+			cmd->execute_task = target_emulate_unmap;
+		break;
+	case WRITE_SAME_16:
+		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+
+		if (sectors)
+			size = transport_get_size(1, cdb, cmd);
+		else {
+			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
+			goto out_invalid_cdb_field;
+		}
+
+		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+
+		if (target_check_write_same_discard(&cdb[1], dev) < 0)
+			goto out_unsupported_cdb;
+		if (!passthrough)
+			cmd->execute_task = target_emulate_write_same;
+		break;
+	case WRITE_SAME:
+		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+
+		if (sectors)
+			size = transport_get_size(1, cdb, cmd);
+		else {
+			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
+			goto out_invalid_cdb_field;
+		}
+
+		cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		/*
+		 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
+		 * of byte 1 bit 3 UNMAP instead of original reserved field
+		 */
+		if (target_check_write_same_discard(&cdb[1], dev) < 0)
+			goto out_unsupported_cdb;
+		if (!passthrough)
+			cmd->execute_task = target_emulate_write_same;
+		break;
+	case ALLOW_MEDIUM_REMOVAL:
+	case ERASE:
+	case REZERO_UNIT:
+	case SEEK_10:
+	case SPACE:
+	case START_STOP:
+	case TEST_UNIT_READY:
+	case VERIFY:
+	case WRITE_FILEMARKS:
+		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+		if (!passthrough)
+			cmd->execute_task = target_emulate_noop;
+		break;
+	case GPCMD_CLOSE_TRACK:
+	case INITIALIZE_ELEMENT_STATUS:
+	case GPCMD_LOAD_UNLOAD:
+	case GPCMD_SET_SPEED:
+	case MOVE_MEDIUM:
+		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+		break;
+	case REPORT_LUNS:
+		cmd->execute_task = target_report_luns;
+		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+		/*
+		 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
+		 * See spc4r17 section 5.3
+		 */
+		if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+			cmd->sam_task_attr = MSG_HEAD_TAG;
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	default:
+		pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
+			" 0x%02x, sending CHECK_CONDITION.\n",
+			cmd->se_tfo->get_fabric_name(), cdb[0]);
+		goto out_unsupported_cdb;
+	}
+
+	if (size != cmd->data_length) {
+		pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
+			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
+			" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
+				cmd->data_length, size, cdb[0]);
+
+		cmd->cmd_spdtl = size;
+
+		if (cmd->data_direction == DMA_TO_DEVICE) {
+			pr_err("Rejecting underflow/overflow"
+					" WRITE data\n");
+			goto out_invalid_cdb_field;
+		}
+		/*
+		 * Reject READ_* or WRITE_* with overflow/underflow for
+		 * type SCF_SCSI_DATA_SG_IO_CDB.
+		 */
+		if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512))  {
+			pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
+				" CDB on non 512-byte sector setup subsystem"
+				" plugin: %s\n", dev->transport->name);
+			/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
+			goto out_invalid_cdb_field;
+		}
+		/*
+		 * For the overflow case keep the existing fabric provided
+		 * ->data_length.  Otherwise for the underflow case, reset
+		 * ->data_length to the smaller SCSI expected data transfer
+		 * length.
+		 */
+		if (size > cmd->data_length) {
+			cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
+			cmd->residual_count = (size - cmd->data_length);
+		} else {
+			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+			cmd->residual_count = (cmd->data_length - size);
+			cmd->data_length = size;
+		}
+	}
+
+	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB &&
+	    sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
+		printk_ratelimited(KERN_ERR "SCSI OP %02xh with too big sectors %u\n",
+				   cdb[0], sectors);
+		goto out_invalid_cdb_field;
+	}
+
+	/* reject any command that we don't have a handler for */
+	if (!(passthrough || cmd->execute_task ||
+	     (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
+		goto out_unsupported_cdb;
+
+	transport_set_supported_SAM_opcode(cmd);
+	return ret;
+
+out_unsupported_cdb:
+	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+	cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+	return -EINVAL;
+out_invalid_cdb_field:
+	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+	cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+	return -EINVAL;
+}
+
+/*
+ * Called from I/O completion to determine which dormant/delayed
+ * and ordered cmds need to have their tasks added to the execution queue.
+ */
+static void transport_complete_task_attr(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_cmd *cmd_p, *cmd_tmp;
+	int new_active_tasks = 0;
+
+	if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
+		atomic_dec(&dev->simple_cmds);
+		smp_mb__after_atomic_dec();
+		dev->dev_cur_ordered_id++;
+		pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
+			" SIMPLE: %u\n", dev->dev_cur_ordered_id,
+			cmd->se_ordered_id);
+	} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
+		dev->dev_cur_ordered_id++;
+		pr_debug("Incremented dev_cur_ordered_id: %u for"
+			" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
+			cmd->se_ordered_id);
+	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
+		atomic_dec(&dev->dev_ordered_sync);
+		smp_mb__after_atomic_dec();
+
+		dev->dev_cur_ordered_id++;
+		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
+			" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
+	}
+	/*
+	 * Process all commands up to the last received
+	 * ORDERED task attribute which requires another blocking
+	 * boundary
+	 */
+	spin_lock(&dev->delayed_cmd_lock);
+	list_for_each_entry_safe(cmd_p, cmd_tmp,
+			&dev->delayed_cmd_list, se_delayed_node) {
+
+		list_del(&cmd_p->se_delayed_node);
+		spin_unlock(&dev->delayed_cmd_lock);
+
+		pr_debug("Calling add_tasks() for"
+			" cmd_p: 0x%02x Task Attr: 0x%02x"
+			" Dormant -> Active, se_ordered_id: %u\n",
+			cmd_p->t_task_cdb[0],
+			cmd_p->sam_task_attr, cmd_p->se_ordered_id);
+
+		transport_add_tasks_from_cmd(cmd_p);
+		new_active_tasks++;
+
+		spin_lock(&dev->delayed_cmd_lock);
+		if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
+			break;
+	}
+	spin_unlock(&dev->delayed_cmd_lock);
+	/*
+	 * If new tasks have become active, wake up the transport thread
+	 * to do the processing of the Active tasks.
+	 */
+	if (new_active_tasks != 0)
+		wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
+}
+
+static void transport_complete_qf(struct se_cmd *cmd)
+{
+	int ret = 0;
+
+	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+		transport_complete_task_attr(cmd);
+
+	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
+		ret = cmd->se_tfo->queue_status(cmd);
+		goto out;
+	}
+
+	switch (cmd->data_direction) {
+	case DMA_FROM_DEVICE:
+		ret = cmd->se_tfo->queue_data_in(cmd);
+		break;
+	case DMA_TO_DEVICE:
+		if (cmd->t_bidi_data_sg) {
+			ret = cmd->se_tfo->queue_data_in(cmd);
+			if (ret < 0)
+				break;
+		}
+		/* Fall through for DMA_TO_DEVICE */
+	case DMA_NONE:
+		ret = cmd->se_tfo->queue_status(cmd);
+		break;
+	default:
+		break;
+	}
+
+out:
+	if (ret < 0) {
+		transport_handle_queue_full(cmd, cmd->se_dev);
+		return;
+	}
+	transport_lun_remove_cmd(cmd);
+	transport_cmd_check_stop_to_fabric(cmd);
+}
+
+static void transport_handle_queue_full(
+	struct se_cmd *cmd,
+	struct se_device *dev)
+{
+	spin_lock_irq(&dev->qf_cmd_lock);
+	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
+	atomic_inc(&dev->dev_qf_count);
+	smp_mb__after_atomic_inc();
+	spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
+
+	schedule_work(&cmd->se_dev->qf_work_queue);
+}
+
+static void target_complete_ok_work(struct work_struct *work)
+{
+	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
+	int reason = 0, ret;
+
+	/*
+	 * Check if we need to move delayed/dormant tasks from cmds on the
+	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
+	 * Attribute.
+	 */
+	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+		transport_complete_task_attr(cmd);
+	/*
+	 * Check to schedule QUEUE_FULL work, or execute an existing
+	 * cmd->transport_qf_callback()
+	 */
+	if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
+		schedule_work(&cmd->se_dev->qf_work_queue);
+
+	/*
+	 * Check if we need to retrieve a sense buffer from
+	 * the struct se_cmd in question.
+	 */
+	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
+		if (transport_get_sense_data(cmd) < 0)
+			reason = TCM_NON_EXISTENT_LUN;
+
+		/*
+		 * Only set when an struct se_task->task_scsi_status returned
+		 * a non GOOD status.
+		 */
+		if (cmd->scsi_status) {
+			ret = transport_send_check_condition_and_sense(
+					cmd, reason, 1);
+			if (ret == -EAGAIN || ret == -ENOMEM)
+				goto queue_full;
+
+			transport_lun_remove_cmd(cmd);
+			transport_cmd_check_stop_to_fabric(cmd);
+			return;
+		}
+	}
+	/*
+	 * Check for a callback, used by amongst other things
+	 * XDWRITE_READ_10 emulation.
+	 */
+	if (cmd->transport_complete_callback)
+		cmd->transport_complete_callback(cmd);
+
+	switch (cmd->data_direction) {
+	case DMA_FROM_DEVICE:
+		spin_lock(&cmd->se_lun->lun_sep_lock);
+		if (cmd->se_lun->lun_sep) {
+			cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
+					cmd->data_length;
+		}
+		spin_unlock(&cmd->se_lun->lun_sep_lock);
+
+		ret = cmd->se_tfo->queue_data_in(cmd);
+		if (ret == -EAGAIN || ret == -ENOMEM)
+			goto queue_full;
+		break;
+	case DMA_TO_DEVICE:
+		spin_lock(&cmd->se_lun->lun_sep_lock);
+		if (cmd->se_lun->lun_sep) {
+			cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
+				cmd->data_length;
+		}
+		spin_unlock(&cmd->se_lun->lun_sep_lock);
+		/*
+		 * Check if we need to send READ payload for BIDI-COMMAND
+		 */
+		if (cmd->t_bidi_data_sg) {
+			spin_lock(&cmd->se_lun->lun_sep_lock);
+			if (cmd->se_lun->lun_sep) {
+				cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
+					cmd->data_length;
+			}
+			spin_unlock(&cmd->se_lun->lun_sep_lock);
+			ret = cmd->se_tfo->queue_data_in(cmd);
+			if (ret == -EAGAIN || ret == -ENOMEM)
+				goto queue_full;
+			break;
+		}
+		/* Fall through for DMA_TO_DEVICE */
+	case DMA_NONE:
+		ret = cmd->se_tfo->queue_status(cmd);
+		if (ret == -EAGAIN || ret == -ENOMEM)
+			goto queue_full;
+		break;
+	default:
+		break;
+	}
+
+	transport_lun_remove_cmd(cmd);
+	transport_cmd_check_stop_to_fabric(cmd);
+	return;
+
+queue_full:
+	pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
+		" data_direction: %d\n", cmd, cmd->data_direction);
+	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
+	transport_handle_queue_full(cmd, cmd->se_dev);
+}
+
+static void transport_free_dev_tasks(struct se_cmd *cmd)
+{
+	struct se_task *task, *task_tmp;
+	unsigned long flags;
+	LIST_HEAD(dispose_list);
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	list_for_each_entry_safe(task, task_tmp,
+				&cmd->t_task_list, t_list) {
+		if (!(task->task_flags & TF_ACTIVE))
+			list_move_tail(&task->t_list, &dispose_list);
+	}
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	while (!list_empty(&dispose_list)) {
+		task = list_first_entry(&dispose_list, struct se_task, t_list);
+
+		if (task->task_sg != cmd->t_data_sg &&
+		    task->task_sg != cmd->t_bidi_data_sg)
+			kfree(task->task_sg);
+
+		list_del(&task->t_list);
+
+		cmd->se_dev->transport->free_task(task);
+	}
+}
+
+static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
+{
+	struct scatterlist *sg;
+	int count;
+
+	for_each_sg(sgl, sg, nents, count)
+		__free_page(sg_page(sg));
+
+	kfree(sgl);
+}
+
+static inline void transport_free_pages(struct se_cmd *cmd)
+{
+	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
+		return;
+
+	transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
+	cmd->t_data_sg = NULL;
+	cmd->t_data_nents = 0;
+
+	transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
+	cmd->t_bidi_data_sg = NULL;
+	cmd->t_bidi_data_nents = 0;
+}
+
+/**
+ * transport_release_cmd - free a command
+ * @cmd:       command to free
+ *
+ * This routine unconditionally frees a command, and reference counting
+ * or list removal must be done in the caller.
+ */
+static void transport_release_cmd(struct se_cmd *cmd)
+{
+	BUG_ON(!cmd->se_tfo);
+
+	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+		core_tmr_release_req(cmd->se_tmr_req);
+	if (cmd->t_task_cdb != cmd->__t_task_cdb)
+		kfree(cmd->t_task_cdb);
+	/*
+	 * If this cmd has been setup with target_get_sess_cmd(), drop
+	 * the kref and call ->release_cmd() in kref callback.
+	 */
+	 if (cmd->check_release != 0) {
+		target_put_sess_cmd(cmd->se_sess, cmd);
+		return;
+	}
+	cmd->se_tfo->release_cmd(cmd);
+}
+
+/**
+ * transport_put_cmd - release a reference to a command
+ * @cmd:       command to release
+ *
+ * This routine releases our reference to the command and frees it if possible.
+ */
+static void transport_put_cmd(struct se_cmd *cmd)
+{
+	unsigned long flags;
+	int free_tasks = 0;
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (atomic_read(&cmd->t_fe_count)) {
+		if (!atomic_dec_and_test(&cmd->t_fe_count))
+			goto out_busy;
+	}
+
+	if (atomic_read(&cmd->t_se_count)) {
+		if (!atomic_dec_and_test(&cmd->t_se_count))
+			goto out_busy;
+	}
+
+	if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
+		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
+		transport_all_task_dev_remove_state(cmd);
+		free_tasks = 1;
+	}
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	if (free_tasks != 0)
+		transport_free_dev_tasks(cmd);
+
+	transport_free_pages(cmd);
+	transport_release_cmd(cmd);
+	return;
+out_busy:
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+}
+
+/*
+ * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
+ * allocating in the core.
+ * @cmd:  Associated se_cmd descriptor
+ * @mem:  SGL style memory for TCM WRITE / READ
+ * @sg_mem_num: Number of SGL elements
+ * @mem_bidi_in: SGL style memory for TCM BIDI READ
+ * @sg_mem_bidi_num: Number of BIDI READ SGL elements
+ *
+ * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
+ * of parameters.
+ */
+int transport_generic_map_mem_to_cmd(
+	struct se_cmd *cmd,
+	struct scatterlist *sgl,
+	u32 sgl_count,
+	struct scatterlist *sgl_bidi,
+	u32 sgl_bidi_count)
+{
+	if (!sgl || !sgl_count)
+		return 0;
+
+	if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
+	    (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
+		/*
+		 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+		 * scatterlists already have been set to follow what the fabric
+		 * passes for the original expected data transfer length.
+		 */
+		if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+			pr_warn("Rejecting SCSI DATA overflow for fabric using"
+				" SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+			return -EINVAL;
+		}
+
+		cmd->t_data_sg = sgl;
+		cmd->t_data_nents = sgl_count;
+
+		if (sgl_bidi && sgl_bidi_count) {
+			cmd->t_bidi_data_sg = sgl_bidi;
+			cmd->t_bidi_data_nents = sgl_bidi_count;
+		}
+		cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
+
+void *transport_kmap_data_sg(struct se_cmd *cmd)
+{
+	struct scatterlist *sg = cmd->t_data_sg;
+	struct page **pages;
+	int i;
+
+	BUG_ON(!sg);
+	/*
+	 * We need to take into account a possible offset here for fabrics like
+	 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
+	 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
+	 */
+	if (!cmd->t_data_nents)
+		return NULL;
+	else if (cmd->t_data_nents == 1)
+		return kmap(sg_page(sg)) + sg->offset;
+
+	/* >1 page. use vmap */
+	pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
+	if (!pages)
+		return NULL;
+
+	/* convert sg[] to pages[] */
+	for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
+		pages[i] = sg_page(sg);
+	}
+
+	cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
+	kfree(pages);
+	if (!cmd->t_data_vmap)
+		return NULL;
+
+	return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
+}
+EXPORT_SYMBOL(transport_kmap_data_sg);
+
+void transport_kunmap_data_sg(struct se_cmd *cmd)
+{
+	if (!cmd->t_data_nents) {
+		return;
+	} else if (cmd->t_data_nents == 1) {
+		kunmap(sg_page(cmd->t_data_sg));
+		return;
+	}
+
+	vunmap(cmd->t_data_vmap);
+	cmd->t_data_vmap = NULL;
+}
+EXPORT_SYMBOL(transport_kunmap_data_sg);
+
+static int
+transport_generic_get_mem(struct se_cmd *cmd)
+{
+	u32 length = cmd->data_length;
+	unsigned int nents;
+	struct page *page;
+	gfp_t zero_flag;
+	int i = 0;
+
+	nents = DIV_ROUND_UP(length, PAGE_SIZE);
+	cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
+	if (!cmd->t_data_sg)
+		return -ENOMEM;
+
+	cmd->t_data_nents = nents;
+	sg_init_table(cmd->t_data_sg, nents);
+
+	zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO;
+
+	while (length) {
+		u32 page_len = min_t(u32, length, PAGE_SIZE);
+		page = alloc_page(GFP_KERNEL | zero_flag);
+		if (!page)
+			goto out;
+
+		sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
+		length -= page_len;
+		i++;
+	}
+	return 0;
+
+out:
+	while (i > 0) {
+		i--;
+		__free_page(sg_page(&cmd->t_data_sg[i]));
+	}
+	kfree(cmd->t_data_sg);
+	cmd->t_data_sg = NULL;
+	return -ENOMEM;
+}
+
+/* Reduce sectors if they are too long for the device */
+static inline sector_t transport_limit_task_sectors(
+	struct se_device *dev,
+	unsigned long long lba,
+	sector_t sectors)
+{
+	sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
+
+	if (dev->transport->get_device_type(dev) == TYPE_DISK)
+		if ((lba + sectors) > transport_dev_end_lba(dev))
+			sectors = ((transport_dev_end_lba(dev) - lba) + 1);
+
+	return sectors;
+}
+
+
+/*
+ * This function can be used by HW target mode drivers to create a linked
+ * scatterlist from all contiguously allocated struct se_task->task_sg[].
+ * This is intended to be called during the completion path by TCM Core
+ * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
+ */
+void transport_do_task_sg_chain(struct se_cmd *cmd)
+{
+	struct scatterlist *sg_first = NULL;
+	struct scatterlist *sg_prev = NULL;
+	int sg_prev_nents = 0;
+	struct scatterlist *sg;
+	struct se_task *task;
+	u32 chained_nents = 0;
+	int i;
+
+	BUG_ON(!cmd->se_tfo->task_sg_chaining);
+
+	/*
+	 * Walk the struct se_task list and setup scatterlist chains
+	 * for each contiguously allocated struct se_task->task_sg[].
+	 */
+	list_for_each_entry(task, &cmd->t_task_list, t_list) {
+		if (!task->task_sg)
+			continue;
+
+		if (!sg_first) {
+			sg_first = task->task_sg;
+			chained_nents = task->task_sg_nents;
+		} else {
+			sg_chain(sg_prev, sg_prev_nents, task->task_sg);
+			chained_nents += task->task_sg_nents;
+		}
+		/*
+		 * For the padded tasks, use the extra SGL vector allocated
+		 * in transport_allocate_data_tasks() for the sg_prev_nents
+		 * offset into sg_chain() above.
+		 *
+		 * We do not need the padding for the last task (or a single
+		 * task), but in that case we will never use the sg_prev_nents
+		 * value below which would be incorrect.
+		 */
+		sg_prev_nents = (task->task_sg_nents + 1);
+		sg_prev = task->task_sg;
+	}
+	/*
+	 * Setup the starting pointer and total t_tasks_sg_linked_no including
+	 * padding SGs for linking and to mark the end.
+	 */
+	cmd->t_tasks_sg_chained = sg_first;
+	cmd->t_tasks_sg_chained_no = chained_nents;
+
+	pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
+		" t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
+		cmd->t_tasks_sg_chained_no);
+
+	for_each_sg(cmd->t_tasks_sg_chained, sg,
+			cmd->t_tasks_sg_chained_no, i) {
+
+		pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
+			i, sg, sg_page(sg), sg->length, sg->offset);
+		if (sg_is_chain(sg))
+			pr_debug("SG: %p sg_is_chain=1\n", sg);
+		if (sg_is_last(sg))
+			pr_debug("SG: %p sg_is_last=1\n", sg);
+	}
+}
+EXPORT_SYMBOL(transport_do_task_sg_chain);
+
+/*
+ * Break up cmd into chunks transport can handle
+ */
+static int
+transport_allocate_data_tasks(struct se_cmd *cmd,
+	enum dma_data_direction data_direction,
+	struct scatterlist *cmd_sg, unsigned int sgl_nents)
+{
+	struct se_device *dev = cmd->se_dev;
+	int task_count, i;
+	unsigned long long lba;
+	sector_t sectors, dev_max_sectors;
+	u32 sector_size;
+
+	if (transport_cmd_get_valid_sectors(cmd) < 0)
+		return -EINVAL;
+
+	dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
+	sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
+
+	WARN_ON(cmd->data_length % sector_size);
+
+	lba = cmd->t_task_lba;
+	sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
+	task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
+
+	/*
+	 * If we need just a single task reuse the SG list in the command
+	 * and avoid a lot of work.
+	 */
+	if (task_count == 1) {
+		struct se_task *task;
+		unsigned long flags;
+
+		task = transport_generic_get_task(cmd, data_direction);
+		if (!task)
+			return -ENOMEM;
+
+		task->task_sg = cmd_sg;
+		task->task_sg_nents = sgl_nents;
+
+		task->task_lba = lba;
+		task->task_sectors = sectors;
+		task->task_size = task->task_sectors * sector_size;
+
+		spin_lock_irqsave(&cmd->t_state_lock, flags);
+		list_add_tail(&task->t_list, &cmd->t_task_list);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+		return task_count;
+	}
+
+	for (i = 0; i < task_count; i++) {
+		struct se_task *task;
+		unsigned int task_size, task_sg_nents_padded;
+		struct scatterlist *sg;
+		unsigned long flags;
+		int count;
+
+		task = transport_generic_get_task(cmd, data_direction);
+		if (!task)
+			return -ENOMEM;
+
+		task->task_lba = lba;
+		task->task_sectors = min(sectors, dev_max_sectors);
+		task->task_size = task->task_sectors * sector_size;
+
+		/*
+		 * This now assumes that passed sg_ents are in PAGE_SIZE chunks
+		 * in order to calculate the number per task SGL entries
+		 */
+		task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
+		/*
+		 * Check if the fabric module driver is requesting that all
+		 * struct se_task->task_sg[] be chained together..  If so,
+		 * then allocate an extra padding SG entry for linking and
+		 * marking the end of the chained SGL for every task except
+		 * the last one for (task_count > 1) operation, or skipping
+		 * the extra padding for the (task_count == 1) case.
+		 */
+		if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
+			task_sg_nents_padded = (task->task_sg_nents + 1);
+		} else
+			task_sg_nents_padded = task->task_sg_nents;
+
+		task->task_sg = kmalloc(sizeof(struct scatterlist) *
+					task_sg_nents_padded, GFP_KERNEL);
+		if (!task->task_sg) {
+			cmd->se_dev->transport->free_task(task);
+			return -ENOMEM;
+		}
+
+		sg_init_table(task->task_sg, task_sg_nents_padded);
+
+		task_size = task->task_size;
+
+		/* Build new sgl, only up to task_size */
+		for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
+			if (cmd_sg->length > task_size)
+				break;
+
+			*sg = *cmd_sg;
+			task_size -= cmd_sg->length;
+			cmd_sg = sg_next(cmd_sg);
+		}
+
+		lba += task->task_sectors;
+		sectors -= task->task_sectors;
+
+		spin_lock_irqsave(&cmd->t_state_lock, flags);
+		list_add_tail(&task->t_list, &cmd->t_task_list);
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+	}
+
+	return task_count;
+}
+
+static int
+transport_allocate_control_task(struct se_cmd *cmd)
+{
+	struct se_task *task;
+	unsigned long flags;
+
+	/* Workaround for handling zero-length control CDBs */
+	if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
+	    !cmd->data_length)
+		return 0;
+
+	task = transport_generic_get_task(cmd, cmd->data_direction);
+	if (!task)
+		return -ENOMEM;
+
+	task->task_sg = cmd->t_data_sg;
+	task->task_size = cmd->data_length;
+	task->task_sg_nents = cmd->t_data_nents;
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	list_add_tail(&task->t_list, &cmd->t_task_list);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	/* Success! Return number of tasks allocated */
+	return 1;
+}
+
+/*
+ * Allocate any required ressources to execute the command, and either place
+ * it on the execution queue if possible.  For writes we might not have the
+ * payload yet, thus notify the fabric via a call to ->write_pending instead.
+ */
+int transport_generic_new_cmd(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	int task_cdbs, task_cdbs_bidi = 0;
+	int set_counts = 1;
+	int ret = 0;
+
+	/*
+	 * Determine is the TCM fabric module has already allocated physical
+	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
+	 * beforehand.
+	 */
+	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
+	    cmd->data_length) {
+		ret = transport_generic_get_mem(cmd);
+		if (ret < 0)
+			goto out_fail;
+	}
+
+	/*
+	 * For BIDI command set up the read tasks first.
+	 */
+	if (cmd->t_bidi_data_sg &&
+	    dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
+		BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB));
+
+		task_cdbs_bidi = transport_allocate_data_tasks(cmd,
+				DMA_FROM_DEVICE, cmd->t_bidi_data_sg,
+				cmd->t_bidi_data_nents);
+		if (task_cdbs_bidi <= 0)
+			goto out_fail;
+
+		atomic_inc(&cmd->t_fe_count);
+		atomic_inc(&cmd->t_se_count);
+		set_counts = 0;
+	}
+
+	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
+		task_cdbs = transport_allocate_data_tasks(cmd,
+					cmd->data_direction, cmd->t_data_sg,
+					cmd->t_data_nents);
+	} else {
+		task_cdbs = transport_allocate_control_task(cmd);
+	}
+
+	if (task_cdbs < 0)
+		goto out_fail;
+	else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+		spin_lock_irq(&cmd->t_state_lock);
+		cmd->t_state = TRANSPORT_COMPLETE;
+		cmd->transport_state |= CMD_T_ACTIVE;
+		spin_unlock_irq(&cmd->t_state_lock);
+
+		if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
+			u8 ua_asc = 0, ua_ascq = 0;
+
+			core_scsi3_ua_clear_for_request_sense(cmd,
+					&ua_asc, &ua_ascq);
+		}
+
+		INIT_WORK(&cmd->work, target_complete_ok_work);
+		queue_work(target_completion_wq, &cmd->work);
+		return 0;
+	}
+
+	if (set_counts) {
+		atomic_inc(&cmd->t_fe_count);
+		atomic_inc(&cmd->t_se_count);
+	}
+
+	cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
+	atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
+	atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
+
+	/*
+	 * For WRITEs, let the fabric know its buffer is ready..
+	 * This WRITE struct se_cmd (and all of its associated struct se_task's)
+	 * will be added to the struct se_device execution queue after its WRITE
+	 * data has arrived. (ie: It gets handled by the transport processing
+	 * thread a second time)
+	 */
+	if (cmd->data_direction == DMA_TO_DEVICE) {
+		transport_add_tasks_to_state_queue(cmd);
+		return transport_generic_write_pending(cmd);
+	}
+	/*
+	 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
+	 * to the execution queue.
+	 */
+	transport_execute_tasks(cmd);
+	return 0;
+
+out_fail:
+	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+	cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	return -EINVAL;
+}
+EXPORT_SYMBOL(transport_generic_new_cmd);
+
+/*	transport_generic_process_write():
+ *
+ *
+ */
+void transport_generic_process_write(struct se_cmd *cmd)
+{
+	transport_execute_tasks(cmd);
+}
+EXPORT_SYMBOL(transport_generic_process_write);
+
+static void transport_write_pending_qf(struct se_cmd *cmd)
+{
+	int ret;
+
+	ret = cmd->se_tfo->write_pending(cmd);
+	if (ret == -EAGAIN || ret == -ENOMEM) {
+		pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
+			 cmd);
+		transport_handle_queue_full(cmd, cmd->se_dev);
+	}
+}
+
+static int transport_generic_write_pending(struct se_cmd *cmd)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	cmd->t_state = TRANSPORT_WRITE_PENDING;
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	/*
+	 * Clear the se_cmd for WRITE_PENDING status in order to set
+	 * CMD_T_ACTIVE so that transport_generic_handle_data can be called
+	 * from HW target mode interrupt code.  This is safe to be called
+	 * with transport_off=1 before the cmd->se_tfo->write_pending
+	 * because the se_cmd->se_lun pointer is not being cleared.
+	 */
+	transport_cmd_check_stop(cmd, 1, 0);
+
+	/*
+	 * Call the fabric write_pending function here to let the
+	 * frontend know that WRITE buffers are ready.
+	 */
+	ret = cmd->se_tfo->write_pending(cmd);
+	if (ret == -EAGAIN || ret == -ENOMEM)
+		goto queue_full;
+	else if (ret < 0)
+		return ret;
+
+	return 1;
+
+queue_full:
+	pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
+	cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
+	transport_handle_queue_full(cmd, cmd->se_dev);
+	return 0;
+}
+
+void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+{
+	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
+		if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+			 transport_wait_for_tasks(cmd);
+
+		transport_release_cmd(cmd);
+	} else {
+		if (wait_for_tasks)
+			transport_wait_for_tasks(cmd);
+
+		core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
+
+		if (cmd->se_lun)
+			transport_lun_remove_cmd(cmd);
+
+		transport_free_dev_tasks(cmd);
+
+		transport_put_cmd(cmd);
+	}
+}
+EXPORT_SYMBOL(transport_generic_free_cmd);
+
+/* target_get_sess_cmd - Add command to active ->sess_cmd_list
+ * @se_sess:	session to reference
+ * @se_cmd:	command descriptor to add
+ * @ack_kref:	Signal that fabric will perform an ack target_put_sess_cmd()
+ */
+void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
+			bool ack_kref)
+{
+	unsigned long flags;
+
+	kref_init(&se_cmd->cmd_kref);
+	/*
+	 * Add a second kref if the fabric caller is expecting to handle
+	 * fabric acknowledgement that requires two target_put_sess_cmd()
+	 * invocations before se_cmd descriptor release.
+	 */
+	if (ack_kref == true) {
+		kref_get(&se_cmd->cmd_kref);
+		se_cmd->se_cmd_flags |= SCF_ACK_KREF;
+	}
+
+	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+	list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
+	se_cmd->check_release = 1;
+	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+}
+EXPORT_SYMBOL(target_get_sess_cmd);
+
+static void target_release_cmd_kref(struct kref *kref)
+{
+	struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
+	struct se_session *se_sess = se_cmd->se_sess;
+	unsigned long flags;
+
+	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+	if (list_empty(&se_cmd->se_cmd_list)) {
+		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+		se_cmd->se_tfo->release_cmd(se_cmd);
+		return;
+	}
+	if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
+		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+		complete(&se_cmd->cmd_wait_comp);
+		return;
+	}
+	list_del(&se_cmd->se_cmd_list);
+	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+
+	se_cmd->se_tfo->release_cmd(se_cmd);
+}
+
+/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
+ * @se_sess:	session to reference
+ * @se_cmd:	command descriptor to drop
+ */
+int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+{
+	return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
+}
+EXPORT_SYMBOL(target_put_sess_cmd);
+
+/* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
+ * @se_sess:	session to split
+ */
+void target_splice_sess_cmd_list(struct se_session *se_sess)
+{
+	struct se_cmd *se_cmd;
+	unsigned long flags;
+
+	WARN_ON(!list_empty(&se_sess->sess_wait_list));
+	INIT_LIST_HEAD(&se_sess->sess_wait_list);
+
+	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+	se_sess->sess_tearing_down = 1;
+
+	list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
+
+	list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
+		se_cmd->cmd_wait_set = 1;
+
+	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+}
+EXPORT_SYMBOL(target_splice_sess_cmd_list);
+
+/* target_wait_for_sess_cmds - Wait for outstanding descriptors
+ * @se_sess:    session to wait for active I/O
+ * @wait_for_tasks:	Make extra transport_wait_for_tasks call
+ */
+void target_wait_for_sess_cmds(
+	struct se_session *se_sess,
+	int wait_for_tasks)
+{
+	struct se_cmd *se_cmd, *tmp_cmd;
+	bool rc = false;
+
+	list_for_each_entry_safe(se_cmd, tmp_cmd,
+				&se_sess->sess_wait_list, se_cmd_list) {
+		list_del(&se_cmd->se_cmd_list);
+
+		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
+			" %d\n", se_cmd, se_cmd->t_state,
+			se_cmd->se_tfo->get_cmd_state(se_cmd));
+
+		if (wait_for_tasks) {
+			pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d,"
+				" fabric state: %d\n", se_cmd, se_cmd->t_state,
+				se_cmd->se_tfo->get_cmd_state(se_cmd));
+
+			rc = transport_wait_for_tasks(se_cmd);
+
+			pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d,"
+				" fabric state: %d\n", se_cmd, se_cmd->t_state,
+				se_cmd->se_tfo->get_cmd_state(se_cmd));
+		}
+
+		if (!rc) {
+			wait_for_completion(&se_cmd->cmd_wait_comp);
+			pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
+				" fabric state: %d\n", se_cmd, se_cmd->t_state,
+				se_cmd->se_tfo->get_cmd_state(se_cmd));
+		}
+
+		se_cmd->se_tfo->release_cmd(se_cmd);
+	}
+}
+EXPORT_SYMBOL(target_wait_for_sess_cmds);
+
+/*	transport_lun_wait_for_tasks():
+ *
+ *	Called from ConfigFS context to stop the passed struct se_cmd to allow
+ *	an struct se_lun to be successfully shutdown.
+ */
+static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
+{
+	unsigned long flags;
+	int ret;
+	/*
+	 * If the frontend has already requested this struct se_cmd to
+	 * be stopped, we can safely ignore this struct se_cmd.
+	 */
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (cmd->transport_state & CMD_T_STOP) {
+		cmd->transport_state &= ~CMD_T_LUN_STOP;
+
+		pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
+			 cmd->se_tfo->get_task_tag(cmd));
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		transport_cmd_check_stop(cmd, 1, 0);
+		return -EPERM;
+	}
+	cmd->transport_state |= CMD_T_LUN_FE_STOP;
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
+
+	ret = transport_stop_tasks_for_cmd(cmd);
+
+	pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
+			" %d\n", cmd, cmd->t_task_list_num, ret);
+	if (!ret) {
+		pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
+				cmd->se_tfo->get_task_tag(cmd));
+		wait_for_completion(&cmd->transport_lun_stop_comp);
+		pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
+				cmd->se_tfo->get_task_tag(cmd));
+	}
+	transport_remove_cmd_from_queue(cmd);
+
+	return 0;
+}
+
+static void __transport_clear_lun_from_sessions(struct se_lun *lun)
+{
+	struct se_cmd *cmd = NULL;
+	unsigned long lun_flags, cmd_flags;
+	/*
+	 * Do exception processing and return CHECK_CONDITION status to the
+	 * Initiator Port.
+	 */
+	spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+	while (!list_empty(&lun->lun_cmd_list)) {
+		cmd = list_first_entry(&lun->lun_cmd_list,
+		       struct se_cmd, se_lun_node);
+		list_del_init(&cmd->se_lun_node);
+
+		/*
+		 * This will notify iscsi_target_transport.c:
+		 * transport_cmd_check_stop() that a LUN shutdown is in
+		 * progress for the iscsi_cmd_t.
+		 */
+		spin_lock(&cmd->t_state_lock);
+		pr_debug("SE_LUN[%d] - Setting cmd->transport"
+			"_lun_stop for  ITT: 0x%08x\n",
+			cmd->se_lun->unpacked_lun,
+			cmd->se_tfo->get_task_tag(cmd));
+		cmd->transport_state |= CMD_T_LUN_STOP;
+		spin_unlock(&cmd->t_state_lock);
+
+		spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
+
+		if (!cmd->se_lun) {
+			pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
+				cmd->se_tfo->get_task_tag(cmd),
+				cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
+			BUG();
+		}
+		/*
+		 * If the Storage engine still owns the iscsi_cmd_t, determine
+		 * and/or stop its context.
+		 */
+		pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
+			"_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
+			cmd->se_tfo->get_task_tag(cmd));
+
+		if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
+			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+			continue;
+		}
+
+		pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
+			"_wait_for_tasks(): SUCCESS\n",
+			cmd->se_lun->unpacked_lun,
+			cmd->se_tfo->get_task_tag(cmd));
+
+		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
+		if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
+			spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
+			goto check_cond;
+		}
+		cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
+		transport_all_task_dev_remove_state(cmd);
+		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
+
+		transport_free_dev_tasks(cmd);
+		/*
+		 * The Storage engine stopped this struct se_cmd before it was
+		 * send to the fabric frontend for delivery back to the
+		 * Initiator Node.  Return this SCSI CDB back with an
+		 * CHECK_CONDITION status.
+		 */
+check_cond:
+		transport_send_check_condition_and_sense(cmd,
+				TCM_NON_EXISTENT_LUN, 0);
+		/*
+		 *  If the fabric frontend is waiting for this iscsi_cmd_t to
+		 * be released, notify the waiting thread now that LU has
+		 * finished accessing it.
+		 */
+		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
+		if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
+			pr_debug("SE_LUN[%d] - Detected FE stop for"
+				" struct se_cmd: %p ITT: 0x%08x\n",
+				lun->unpacked_lun,
+				cmd, cmd->se_tfo->get_task_tag(cmd));
+
+			spin_unlock_irqrestore(&cmd->t_state_lock,
+					cmd_flags);
+			transport_cmd_check_stop(cmd, 1, 0);
+			complete(&cmd->transport_lun_fe_stop_comp);
+			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+			continue;
+		}
+		pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
+			lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
+
+		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
+		spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+	}
+	spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
+}
+
+static int transport_clear_lun_thread(void *p)
+{
+	struct se_lun *lun = p;
+
+	__transport_clear_lun_from_sessions(lun);
+	complete(&lun->lun_shutdown_comp);
+
+	return 0;
+}
+
+int transport_clear_lun_from_sessions(struct se_lun *lun)
+{
+	struct task_struct *kt;
+
+	kt = kthread_run(transport_clear_lun_thread, lun,
+			"tcm_cl_%u", lun->unpacked_lun);
+	if (IS_ERR(kt)) {
+		pr_err("Unable to start clear_lun thread\n");
+		return PTR_ERR(kt);
+	}
+	wait_for_completion(&lun->lun_shutdown_comp);
+
+	return 0;
+}
+
+/**
+ * transport_wait_for_tasks - wait for completion to occur
+ * @cmd:	command to wait
+ *
+ * Called from frontend fabric context to wait for storage engine
+ * to pause and/or release frontend generated struct se_cmd.
+ */
+bool transport_wait_for_tasks(struct se_cmd *cmd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
+	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		return false;
+	}
+	/*
+	 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
+	 * has been set in transport_set_supported_SAM_opcode().
+	 */
+	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
+	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		return false;
+	}
+	/*
+	 * If we are already stopped due to an external event (ie: LUN shutdown)
+	 * sleep until the connection can have the passed struct se_cmd back.
+	 * The cmd->transport_lun_stopped_sem will be upped by
+	 * transport_clear_lun_from_sessions() once the ConfigFS context caller
+	 * has completed its operation on the struct se_cmd.
+	 */
+	if (cmd->transport_state & CMD_T_LUN_STOP) {
+		pr_debug("wait_for_tasks: Stopping"
+			" wait_for_completion(&cmd->t_tasktransport_lun_fe"
+			"_stop_comp); for ITT: 0x%08x\n",
+			cmd->se_tfo->get_task_tag(cmd));
+		/*
+		 * There is a special case for WRITES where a FE exception +
+		 * LUN shutdown means ConfigFS context is still sleeping on
+		 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
+		 * We go ahead and up transport_lun_stop_comp just to be sure
+		 * here.
+		 */
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		complete(&cmd->transport_lun_stop_comp);
+		wait_for_completion(&cmd->transport_lun_fe_stop_comp);
+		spin_lock_irqsave(&cmd->t_state_lock, flags);
+
+		transport_all_task_dev_remove_state(cmd);
+		/*
+		 * At this point, the frontend who was the originator of this
+		 * struct se_cmd, now owns the structure and can be released through
+		 * normal means below.
+		 */
+		pr_debug("wait_for_tasks: Stopped"
+			" wait_for_completion(&cmd->t_tasktransport_lun_fe_"
+			"stop_comp); for ITT: 0x%08x\n",
+			cmd->se_tfo->get_task_tag(cmd));
+
+		cmd->transport_state &= ~CMD_T_LUN_STOP;
+	}
+
+	if (!(cmd->transport_state & CMD_T_ACTIVE)) {
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		return false;
+	}
+
+	cmd->transport_state |= CMD_T_STOP;
+
+	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
+		" i_state: %d, t_state: %d, CMD_T_STOP\n",
+		cmd, cmd->se_tfo->get_task_tag(cmd),
+		cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
+
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
+
+	wait_for_completion(&cmd->t_transport_stop_comp);
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
+
+	pr_debug("wait_for_tasks: Stopped wait_for_compltion("
+		"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
+		cmd->se_tfo->get_task_tag(cmd));
+
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	return true;
+}
+EXPORT_SYMBOL(transport_wait_for_tasks);
+
+static int transport_get_sense_codes(
+	struct se_cmd *cmd,
+	u8 *asc,
+	u8 *ascq)
+{
+	*asc = cmd->scsi_asc;
+	*ascq = cmd->scsi_ascq;
+
+	return 0;
+}
+
+static int transport_set_sense_codes(
+	struct se_cmd *cmd,
+	u8 asc,
+	u8 ascq)
+{
+	cmd->scsi_asc = asc;
+	cmd->scsi_ascq = ascq;
+
+	return 0;
+}
+
+int transport_send_check_condition_and_sense(
+	struct se_cmd *cmd,
+	u8 reason,
+	int from_transport)
+{
+	unsigned char *buffer = cmd->sense_buffer;
+	unsigned long flags;
+	int offset;
+	u8 asc = 0, ascq = 0;
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		return 0;
+	}
+	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	if (!reason && from_transport)
+		goto after_reason;
+
+	if (!from_transport)
+		cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
+	/*
+	 * Data Segment and SenseLength of the fabric response PDU.
+	 *
+	 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
+	 * from include/scsi/scsi_cmnd.h
+	 */
+	offset = cmd->se_tfo->set_fabric_sense_len(cmd,
+				TRANSPORT_SENSE_BUFFER);
+	/*
+	 * Actual SENSE DATA, see SPC-3 7.23.2  SPC_SENSE_KEY_OFFSET uses
+	 * SENSE KEY values from include/scsi/scsi.h
+	 */
+	switch (reason) {
+	case TCM_NON_EXISTENT_LUN:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+		/* ILLEGAL REQUEST */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+		/* LOGICAL UNIT NOT SUPPORTED */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
+		break;
+	case TCM_UNSUPPORTED_SCSI_OPCODE:
+	case TCM_SECTOR_COUNT_TOO_MANY:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+		/* ILLEGAL REQUEST */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+		/* INVALID COMMAND OPERATION CODE */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
+		break;
+	case TCM_UNKNOWN_MODE_PAGE:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+		/* ILLEGAL REQUEST */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+		/* INVALID FIELD IN CDB */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
+		break;
+	case TCM_CHECK_CONDITION_ABORT_CMD:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* BUS DEVICE RESET FUNCTION OCCURRED */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
+		break;
+	case TCM_INCORRECT_AMOUNT_OF_DATA:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* WRITE ERROR */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
+		/* NOT ENOUGH UNSOLICITED DATA */
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
+		break;
+	case TCM_INVALID_CDB_FIELD:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+		/* ILLEGAL REQUEST */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+		/* INVALID FIELD IN CDB */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
+		break;
+	case TCM_INVALID_PARAMETER_LIST:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+		/* ILLEGAL REQUEST */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+		/* INVALID FIELD IN PARAMETER LIST */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
+		break;
+	case TCM_UNEXPECTED_UNSOLICITED_DATA:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* WRITE ERROR */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
+		/* UNEXPECTED_UNSOLICITED_DATA */
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
+		break;
+	case TCM_SERVICE_CRC_ERROR:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* PROTOCOL SERVICE CRC ERROR */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
+		/* N/A */
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
+		break;
+	case TCM_SNACK_REJECTED:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* READ ERROR */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
+		/* FAILED RETRANSMISSION REQUEST */
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
+		break;
+	case TCM_WRITE_PROTECTED:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+		/* DATA PROTECT */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
+		/* WRITE PROTECTED */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
+		break;
+	case TCM_ADDRESS_OUT_OF_RANGE:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+		/* ILLEGAL REQUEST */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+		/* LOGICAL BLOCK ADDRESS OUT OF RANGE */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x21;
+		break;
+	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+		/* UNIT ATTENTION */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
+		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
+		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
+		break;
+	case TCM_CHECK_CONDITION_NOT_READY:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+		/* Not Ready */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
+		transport_get_sense_codes(cmd, &asc, &ascq);
+		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
+		break;
+	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+	default:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+		/* ILLEGAL REQUEST */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+		/* LOGICAL UNIT COMMUNICATION FAILURE */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x08;
+		break;
+	}
+	/*
+	 * This code uses linux/include/scsi/scsi.h SAM status codes!
+	 */
+	cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+	/*
+	 * Automatically padded, this value is encoded in the fabric's
+	 * data_length response PDU containing the SCSI defined sense data.
+	 */
+	cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER + offset;
+
+after_reason:
+	return cmd->se_tfo->queue_status(cmd);
+}
+EXPORT_SYMBOL(transport_send_check_condition_and_sense);
+
+int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+{
+	int ret = 0;
+
+	if (cmd->transport_state & CMD_T_ABORTED) {
+		if (!send_status ||
+		     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
+			return 1;
+#if 0
+		pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
+			" status for CDB: 0x%02x ITT: 0x%08x\n",
+			cmd->t_task_cdb[0],
+			cmd->se_tfo->get_task_tag(cmd));
+#endif
+		cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
+		cmd->se_tfo->queue_status(cmd);
+		ret = 1;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(transport_check_aborted_status);
+
+void transport_send_task_abort(struct se_cmd *cmd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	/*
+	 * If there are still expected incoming fabric WRITEs, we wait
+	 * until until they have completed before sending a TASK_ABORTED
+	 * response.  This response with TASK_ABORTED status will be
+	 * queued back to fabric module by transport_check_aborted_status().
+	 */
+	if (cmd->data_direction == DMA_TO_DEVICE) {
+		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
+			cmd->transport_state |= CMD_T_ABORTED;
+			smp_mb__after_atomic_inc();
+		}
+	}
+	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+#if 0
+	pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
+		" ITT: 0x%08x\n", cmd->t_task_cdb[0],
+		cmd->se_tfo->get_task_tag(cmd));
+#endif
+	cmd->se_tfo->queue_status(cmd);
+}
+
+static int transport_generic_do_tmr(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_tmr_req *tmr = cmd->se_tmr_req;
+	int ret;
+
+	switch (tmr->function) {
+	case TMR_ABORT_TASK:
+		core_tmr_abort_task(dev, tmr, cmd->se_sess);
+		break;
+	case TMR_ABORT_TASK_SET:
+	case TMR_CLEAR_ACA:
+	case TMR_CLEAR_TASK_SET:
+		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
+		break;
+	case TMR_LUN_RESET:
+		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
+		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
+					 TMR_FUNCTION_REJECTED;
+		break;
+	case TMR_TARGET_WARM_RESET:
+		tmr->response = TMR_FUNCTION_REJECTED;
+		break;
+	case TMR_TARGET_COLD_RESET:
+		tmr->response = TMR_FUNCTION_REJECTED;
+		break;
+	default:
+		pr_err("Uknown TMR function: 0x%02x.\n",
+				tmr->function);
+		tmr->response = TMR_FUNCTION_REJECTED;
+		break;
+	}
+
+	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
+	cmd->se_tfo->queue_tm_rsp(cmd);
+
+	transport_cmd_check_stop_to_fabric(cmd);
+	return 0;
+}
+
+/*	transport_processing_thread():
+ *
+ *
+ */
+static int transport_processing_thread(void *param)
+{
+	int ret;
+	struct se_cmd *cmd;
+	struct se_device *dev = param;
+
+	while (!kthread_should_stop()) {
+		ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
+				atomic_read(&dev->dev_queue_obj.queue_cnt) ||
+				kthread_should_stop());
+		if (ret < 0)
+			goto out;
+
+get_cmd:
+		cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
+		if (!cmd)
+			continue;
+
+		switch (cmd->t_state) {
+		case TRANSPORT_NEW_CMD:
+			BUG();
+			break;
+		case TRANSPORT_NEW_CMD_MAP:
+			if (!cmd->se_tfo->new_cmd_map) {
+				pr_err("cmd->se_tfo->new_cmd_map is"
+					" NULL for TRANSPORT_NEW_CMD_MAP\n");
+				BUG();
+			}
+			ret = cmd->se_tfo->new_cmd_map(cmd);
+			if (ret < 0) {
+				transport_generic_request_failure(cmd);
+				break;
+			}
+			ret = transport_generic_new_cmd(cmd);
+			if (ret < 0) {
+				transport_generic_request_failure(cmd);
+				break;
+			}
+			break;
+		case TRANSPORT_PROCESS_WRITE:
+			transport_generic_process_write(cmd);
+			break;
+		case TRANSPORT_PROCESS_TMR:
+			transport_generic_do_tmr(cmd);
+			break;
+		case TRANSPORT_COMPLETE_QF_WP:
+			transport_write_pending_qf(cmd);
+			break;
+		case TRANSPORT_COMPLETE_QF_OK:
+			transport_complete_qf(cmd);
+			break;
+		default:
+			pr_err("Unknown t_state: %d  for ITT: 0x%08x "
+				"i_state: %d on SE LUN: %u\n",
+				cmd->t_state,
+				cmd->se_tfo->get_task_tag(cmd),
+				cmd->se_tfo->get_cmd_state(cmd),
+				cmd->se_lun->unpacked_lun);
+			BUG();
+		}
+
+		goto get_cmd;
+	}
+
+out:
+	WARN_ON(!list_empty(&dev->state_task_list));
+	WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
+	dev->process_thread = NULL;
+	return 0;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_ua.c b/ap/os/linux/linux-3.4.x/drivers/target/target_core_ua.c
new file mode 100644
index 0000000..6666a0c
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_ua.c
@@ -0,0 +1,329 @@
+/*******************************************************************************
+ * Filename: target_core_ua.c
+ *
+ * This file contains logic for SPC-3 Unit Attention emulation
+ *
+ * Copyright (c) 2009,2010 Rising Tide Systems
+ * Copyright (c) 2009,2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_internal.h"
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+int core_scsi3_ua_check(
+	struct se_cmd *cmd,
+	unsigned char *cdb)
+{
+	struct se_dev_entry *deve;
+	struct se_session *sess = cmd->se_sess;
+	struct se_node_acl *nacl;
+
+	if (!sess)
+		return 0;
+
+	nacl = sess->se_node_acl;
+	if (!nacl)
+		return 0;
+
+	deve = nacl->device_list[cmd->orig_fe_lun];
+	if (!atomic_read(&deve->ua_count))
+		return 0;
+	/*
+	 * From sam4r14, section 5.14 Unit attention condition:
+	 *
+	 * a) if an INQUIRY command enters the enabled command state, the
+	 *    device server shall process the INQUIRY command and shall neither
+	 *    report nor clear any unit attention condition;
+	 * b) if a REPORT LUNS command enters the enabled command state, the
+	 *    device server shall process the REPORT LUNS command and shall not
+	 *    report any unit attention condition;
+	 * e) if a REQUEST SENSE command enters the enabled command state while
+	 *    a unit attention condition exists for the SCSI initiator port
+	 *    associated with the I_T nexus on which the REQUEST SENSE command
+	 *    was received, then the device server shall process the command
+	 *    and either:
+	 */
+	switch (cdb[0]) {
+	case INQUIRY:
+	case REPORT_LUNS:
+	case REQUEST_SENSE:
+		return 0;
+	default:
+		return -EINVAL;
+	}
+
+	return -EINVAL;
+}
+
+int core_scsi3_ua_allocate(
+	struct se_node_acl *nacl,
+	u32 unpacked_lun,
+	u8 asc,
+	u8 ascq)
+{
+	struct se_dev_entry *deve;
+	struct se_ua *ua, *ua_p, *ua_tmp;
+	/*
+	 * PASSTHROUGH OPS
+	 */
+	if (!nacl)
+		return -EINVAL;
+
+	ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
+	if (!ua) {
+		pr_err("Unable to allocate struct se_ua\n");
+		return -ENOMEM;
+	}
+	INIT_LIST_HEAD(&ua->ua_dev_list);
+	INIT_LIST_HEAD(&ua->ua_nacl_list);
+
+	ua->ua_nacl = nacl;
+	ua->ua_asc = asc;
+	ua->ua_ascq = ascq;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[unpacked_lun];
+
+	spin_lock(&deve->ua_lock);
+	list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
+		/*
+		 * Do not report the same UNIT ATTENTION twice..
+		 */
+		if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
+			spin_unlock(&deve->ua_lock);
+			spin_unlock_irq(&nacl->device_list_lock);
+			kmem_cache_free(se_ua_cache, ua);
+			return 0;
+		}
+		/*
+		 * Attach the highest priority Unit Attention to
+		 * the head of the list following sam4r14,
+		 * Section 5.14 Unit Attention Condition:
+		 *
+		 * POWER ON, RESET, OR BUS DEVICE RESET OCCURRED highest
+		 * POWER ON OCCURRED or
+		 * DEVICE INTERNAL RESET
+		 * SCSI BUS RESET OCCURRED or
+		 * MICROCODE HAS BEEN CHANGED or
+		 * protocol specific
+		 * BUS DEVICE RESET FUNCTION OCCURRED
+		 * I_T NEXUS LOSS OCCURRED
+		 * COMMANDS CLEARED BY POWER LOSS NOTIFICATION
+		 * all others                                    Lowest
+		 *
+		 * Each of the ASCQ codes listed above are defined in
+		 * the 29h ASC family, see spc4r17 Table D.1
+		 */
+		if (ua_p->ua_asc == 0x29) {
+			if ((asc == 0x29) && (ascq > ua_p->ua_ascq))
+				list_add(&ua->ua_nacl_list,
+						&deve->ua_list);
+			else
+				list_add_tail(&ua->ua_nacl_list,
+						&deve->ua_list);
+		} else if (ua_p->ua_asc == 0x2a) {
+			/*
+			 * Incoming Family 29h ASCQ codes will override
+			 * Family 2AHh ASCQ codes for Unit Attention condition.
+			 */
+			if ((asc == 0x29) || (ascq > ua_p->ua_asc))
+				list_add(&ua->ua_nacl_list,
+					&deve->ua_list);
+			else
+				list_add_tail(&ua->ua_nacl_list,
+						&deve->ua_list);
+		} else
+			list_add_tail(&ua->ua_nacl_list,
+				&deve->ua_list);
+		spin_unlock(&deve->ua_lock);
+		spin_unlock_irq(&nacl->device_list_lock);
+
+		atomic_inc(&deve->ua_count);
+		smp_mb__after_atomic_inc();
+		return 0;
+	}
+	list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
+	spin_unlock(&deve->ua_lock);
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
+		" 0x%02x, ASCQ: 0x%02x\n",
+		nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+		asc, ascq);
+
+	atomic_inc(&deve->ua_count);
+	smp_mb__after_atomic_inc();
+	return 0;
+}
+
+void core_scsi3_ua_release_all(
+	struct se_dev_entry *deve)
+{
+	struct se_ua *ua, *ua_p;
+
+	spin_lock(&deve->ua_lock);
+	list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+		list_del(&ua->ua_nacl_list);
+		kmem_cache_free(se_ua_cache, ua);
+
+		atomic_dec(&deve->ua_count);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&deve->ua_lock);
+}
+
+void core_scsi3_ua_for_check_condition(
+	struct se_cmd *cmd,
+	u8 *asc,
+	u8 *ascq)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_dev_entry *deve;
+	struct se_session *sess = cmd->se_sess;
+	struct se_node_acl *nacl;
+	struct se_ua *ua = NULL, *ua_p;
+	int head = 1;
+
+	if (!sess)
+		return;
+
+	nacl = sess->se_node_acl;
+	if (!nacl)
+		return;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[cmd->orig_fe_lun];
+	if (!atomic_read(&deve->ua_count)) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return;
+	}
+	/*
+	 * The highest priority Unit Attentions are placed at the head of the
+	 * struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION +
+	 * sense data for the received CDB.
+	 */
+	spin_lock(&deve->ua_lock);
+	list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+		/*
+		 * For ua_intlck_ctrl code not equal to 00b, only report the
+		 * highest priority UNIT_ATTENTION and ASC/ASCQ without
+		 * clearing it.
+		 */
+		if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) {
+			*asc = ua->ua_asc;
+			*ascq = ua->ua_ascq;
+			break;
+		}
+		/*
+		 * Otherwise for the default 00b, release the UNIT ATTENTION
+		 * condition.  Return the ASC/ASCQ of the highest priority UA
+		 * (head of the list) in the outgoing CHECK_CONDITION + sense.
+		 */
+		if (head) {
+			*asc = ua->ua_asc;
+			*ascq = ua->ua_ascq;
+			head = 0;
+		}
+		list_del(&ua->ua_nacl_list);
+		kmem_cache_free(se_ua_cache, ua);
+
+		atomic_dec(&deve->ua_count);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&deve->ua_lock);
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	pr_debug("[%s]: %s UNIT ATTENTION condition with"
+		" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
+		" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
+		nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+		(dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
+		"Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl,
+		cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
+}
+
+int core_scsi3_ua_clear_for_request_sense(
+	struct se_cmd *cmd,
+	u8 *asc,
+	u8 *ascq)
+{
+	struct se_dev_entry *deve;
+	struct se_session *sess = cmd->se_sess;
+	struct se_node_acl *nacl;
+	struct se_ua *ua = NULL, *ua_p;
+	int head = 1;
+
+	if (!sess)
+		return -EINVAL;
+
+	nacl = sess->se_node_acl;
+	if (!nacl)
+		return -EINVAL;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = nacl->device_list[cmd->orig_fe_lun];
+	if (!atomic_read(&deve->ua_count)) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -EPERM;
+	}
+	/*
+	 * The highest priority Unit Attentions are placed at the head of the
+	 * struct se_dev_entry->ua_list.  The First (and hence highest priority)
+	 * ASC/ASCQ will be returned in REQUEST_SENSE payload data for the
+	 * matching struct se_lun.
+	 *
+	 * Once the returning ASC/ASCQ values are set, we go ahead and
+	 * release all of the Unit Attention conditions for the associated
+	 * struct se_lun.
+	 */
+	spin_lock(&deve->ua_lock);
+	list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+		if (head) {
+			*asc = ua->ua_asc;
+			*ascq = ua->ua_ascq;
+			head = 0;
+		}
+		list_del(&ua->ua_nacl_list);
+		kmem_cache_free(se_ua_cache, ua);
+
+		atomic_dec(&deve->ua_count);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&deve->ua_lock);
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
+		" LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
+		" ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+		cmd->orig_fe_lun, *asc, *ascq);
+
+	return (head) ? -EPERM : 0;
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/target_core_ua.h b/ap/os/linux/linux-3.4.x/drivers/target/target_core_ua.h
new file mode 100644
index 0000000..6e6b034
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/target_core_ua.h
@@ -0,0 +1,36 @@
+#ifndef TARGET_CORE_UA_H
+
+/*
+ * From spc4r17, Table D.1: ASC and ASCQ Assignement
+ */
+#define ASCQ_29H_POWER_ON_RESET_OR_BUS_DEVICE_RESET_OCCURED	0x00
+#define ASCQ_29H_POWER_ON_OCCURRED				0x01
+#define ASCQ_29H_SCSI_BUS_RESET_OCCURED				0x02
+#define ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED		0x03
+#define ASCQ_29H_DEVICE_INTERNAL_RESET				0x04
+#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_SINGLE_ENDED	0x05
+#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_LVD		0x06
+#define ASCQ_29H_NEXUS_LOSS_OCCURRED				0x07
+
+#define ASCQ_2AH_PARAMETERS_CHANGED				0x00
+#define ASCQ_2AH_MODE_PARAMETERS_CHANGED			0x01
+#define ASCQ_2AH_LOG_PARAMETERS_CHANGED				0x02
+#define ASCQ_2AH_RESERVATIONS_PREEMPTED				0x03
+#define ASCQ_2AH_RESERVATIONS_RELEASED				0x04
+#define ASCQ_2AH_REGISTRATIONS_PREEMPTED			0x05
+#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED		0x06
+#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
+#define ASCQ_2AH_PRIORITY_CHANGED				0x08
+
+#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS		0x09
+
+extern struct kmem_cache *se_ua_cache;
+
+extern int core_scsi3_ua_check(struct se_cmd *, unsigned char *);
+extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
+extern void core_scsi3_ua_release_all(struct se_dev_entry *);
+extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
+extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
+						u8 *, u8 *);
+
+#endif /* TARGET_CORE_UA_H */
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/Kconfig b/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/Kconfig
new file mode 100644
index 0000000..40caf45
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/Kconfig
@@ -0,0 +1,5 @@
+config TCM_FC
+	tristate "TCM_FC fabric Plugin"
+	depends on LIBFC
+	help
+	Say Y here to enable the TCM FC plugin for accessing FC fabrics in TCM
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/Makefile b/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/Makefile
new file mode 100644
index 0000000..20b14bb
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/Makefile
@@ -0,0 +1,6 @@
+tcm_fc-y +=		tfc_cmd.o \
+			tfc_conf.o \
+			tfc_io.o \
+			tfc_sess.o
+
+obj-$(CONFIG_TCM_FC)	+= tcm_fc.o
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/tcm_fc.h b/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/tcm_fc.h
new file mode 100644
index 0000000..c5eb3c3
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/tcm_fc.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2010 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __TCM_FC_H__
+#define __TCM_FC_H__
+
+#define FT_VERSION "0.4"
+
+#define FT_NAMELEN 32		/* length of ASCII WWPNs including pad */
+#define FT_TPG_NAMELEN 32	/* max length of TPG name */
+#define FT_LUN_NAMELEN 32	/* max length of LUN name */
+
+struct ft_transport_id {
+	__u8	format;
+	__u8	__resvd1[7];
+	__u8	wwpn[8];
+	__u8	__resvd2[8];
+} __attribute__((__packed__));
+
+/*
+ * Session (remote port).
+ */
+struct ft_sess {
+	u32 port_id;			/* for hash lookup use only */
+	u32 params;
+	u16 max_frame;			/* maximum frame size */
+	u64 port_name;			/* port name for transport ID */
+	struct ft_tport *tport;
+	struct se_session *se_sess;
+	struct hlist_node hash;		/* linkage in ft_sess_hash table */
+	struct rcu_head rcu;
+	struct kref kref;		/* ref for hash and outstanding I/Os */
+};
+
+/*
+ * Hash table of sessions per local port.
+ * Hash lookup by remote port FC_ID.
+ */
+#define	FT_SESS_HASH_BITS	6
+#define	FT_SESS_HASH_SIZE	(1 << FT_SESS_HASH_BITS)
+
+/*
+ * Per local port data.
+ * This is created only after a TPG exists that allows target function
+ * for the local port.  If the TPG exists, this is allocated when
+ * we're notified that the local port has been created, or when
+ * the first PRLI provider callback is received.
+ */
+struct ft_tport {
+	struct fc_lport *lport;
+	struct ft_tpg *tpg;		/* NULL if TPG deleted before tport */
+	u32	sess_count;		/* number of sessions in hash */
+	struct rcu_head rcu;
+	struct hlist_head hash[FT_SESS_HASH_SIZE];	/* list of sessions */
+};
+
+/*
+ * Node ID and authentication.
+ */
+struct ft_node_auth {
+	u64	port_name;
+	u64	node_name;
+};
+
+/*
+ * Node ACL for FC remote port session.
+ */
+struct ft_node_acl {
+	struct ft_node_auth node_auth;
+	struct se_node_acl se_node_acl;
+};
+
+struct ft_lun {
+	u32 index;
+	char name[FT_LUN_NAMELEN];
+};
+
+/*
+ * Target portal group (local port).
+ */
+struct ft_tpg {
+	u32 index;
+	struct ft_lport_acl *lport_acl;
+	struct ft_tport *tport;		/* active tport or NULL */
+	struct list_head list;		/* linkage in ft_lport_acl tpg_list */
+	struct list_head lun_list;	/* head of LUNs */
+	struct se_portal_group se_tpg;
+	struct workqueue_struct *workqueue;
+};
+
+struct ft_lport_acl {
+	u64 wwpn;
+	char name[FT_NAMELEN];
+	struct list_head list;
+	struct list_head tpg_list;
+	struct se_wwn fc_lport_wwn;
+};
+
+/*
+ * Commands
+ */
+struct ft_cmd {
+	struct ft_sess *sess;		/* session held for cmd */
+	struct fc_seq *seq;		/* sequence in exchange mgr */
+	struct se_cmd se_cmd;		/* Local TCM I/O descriptor */
+	struct fc_frame *req_frame;
+	u32 write_data_len;		/* data received on writes */
+	struct work_struct work;
+	/* Local sense buffer */
+	unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
+	u32 was_ddp_setup:1;		/* Set only if ddp is setup */
+	u32 aborted:1;			/* Set if aborted by reset or timeout */
+	struct scatterlist *sg;		/* Set only if DDP is setup */
+	u32 sg_cnt;			/* No. of item in scatterlist */
+};
+
+extern struct list_head ft_lport_list;
+extern struct mutex ft_lport_lock;
+extern struct fc4_prov ft_prov;
+extern struct target_fabric_configfs *ft_configfs;
+
+/*
+ * Fabric methods.
+ */
+
+/*
+ * Session ops.
+ */
+void ft_sess_put(struct ft_sess *);
+int ft_sess_shutdown(struct se_session *);
+void ft_sess_close(struct se_session *);
+u32 ft_sess_get_index(struct se_session *);
+u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32);
+
+void ft_lport_add(struct fc_lport *, void *);
+void ft_lport_del(struct fc_lport *, void *);
+int ft_lport_notify(struct notifier_block *, unsigned long, void *);
+
+/*
+ * IO methods.
+ */
+int ft_check_stop_free(struct se_cmd *);
+void ft_release_cmd(struct se_cmd *);
+int ft_queue_status(struct se_cmd *);
+int ft_queue_data_in(struct se_cmd *);
+int ft_write_pending(struct se_cmd *);
+int ft_write_pending_status(struct se_cmd *);
+u32 ft_get_task_tag(struct se_cmd *);
+int ft_get_cmd_state(struct se_cmd *);
+int ft_queue_tm_resp(struct se_cmd *);
+
+/*
+ * other internal functions.
+ */
+void ft_recv_req(struct ft_sess *, struct fc_frame *);
+struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
+struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
+
+void ft_recv_write_data(struct ft_cmd *, struct fc_frame *);
+void ft_dump_cmd(struct ft_cmd *, const char *caller);
+
+ssize_t ft_format_wwn(char *, size_t, u64);
+
+/*
+ * Underlying HW specific helper function
+ */
+void ft_invl_hw_context(struct ft_cmd *);
+
+#endif /* __TCM_FC_H__ */
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/tfc_cmd.c b/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/tfc_cmd.c
new file mode 100644
index 0000000..da90f64
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/tfc_cmd.c
@@ -0,0 +1,564 @@
+/*
+ * Copyright (c) 2010 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/* XXX TBD some includes may be extraneous */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/hash.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "tcm_fc.h"
+
+/*
+ * Dump cmd state for debugging.
+ */
+void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
+{
+	struct fc_exch *ep;
+	struct fc_seq *sp;
+	struct se_cmd *se_cmd;
+	struct scatterlist *sg;
+	int count;
+
+	se_cmd = &cmd->se_cmd;
+	pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n",
+		caller, cmd, cmd->sess, cmd->seq, se_cmd);
+
+	pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
+		caller, cmd, se_cmd->t_data_nents,
+	       se_cmd->data_length, se_cmd->se_cmd_flags);
+
+	for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count)
+		pr_debug("%s: cmd %p sg %p page %p "
+			"len 0x%x off 0x%x\n",
+			caller, cmd, sg,
+			sg_page(sg), sg->length, sg->offset);
+
+	sp = cmd->seq;
+	if (sp) {
+		ep = fc_seq_exch(sp);
+		pr_debug("%s: cmd %p sid %x did %x "
+			"ox_id %x rx_id %x seq_id %x e_stat %x\n",
+			caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
+			sp->id, ep->esb_stat);
+	}
+}
+
+static void ft_free_cmd(struct ft_cmd *cmd)
+{
+	struct fc_frame *fp;
+	struct fc_lport *lport;
+
+	if (!cmd)
+		return;
+	fp = cmd->req_frame;
+	lport = fr_dev(fp);
+	if (fr_seq(fp))
+		lport->tt.seq_release(fr_seq(fp));
+	fc_frame_free(fp);
+	ft_sess_put(cmd->sess);	/* undo get from lookup at recv */
+	kfree(cmd);
+}
+
+void ft_release_cmd(struct se_cmd *se_cmd)
+{
+	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
+
+	ft_free_cmd(cmd);
+}
+
+int ft_check_stop_free(struct se_cmd *se_cmd)
+{
+	transport_generic_free_cmd(se_cmd, 0);
+	return 1;
+}
+
+/*
+ * Send response.
+ */
+int ft_queue_status(struct se_cmd *se_cmd)
+{
+	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
+	struct fc_frame *fp;
+	struct fcp_resp_with_ext *fcp;
+	struct fc_lport *lport;
+	struct fc_exch *ep;
+	size_t len;
+
+	if (cmd->aborted)
+		return 0;
+	ft_dump_cmd(cmd, __func__);
+	ep = fc_seq_exch(cmd->seq);
+	lport = ep->lp;
+	len = sizeof(*fcp) + se_cmd->scsi_sense_length;
+	fp = fc_frame_alloc(lport, len);
+	if (!fp) {
+		/* XXX shouldn't just drop it - requeue and retry? */
+		return 0;
+	}
+	fcp = fc_frame_payload_get(fp, len);
+	memset(fcp, 0, len);
+	fcp->resp.fr_status = se_cmd->scsi_status;
+
+	len = se_cmd->scsi_sense_length;
+	if (len) {
+		fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
+		fcp->ext.fr_sns_len = htonl(len);
+		memcpy((fcp + 1), se_cmd->sense_buffer, len);
+	}
+
+	/*
+	 * Test underflow and overflow with one mask.  Usually both are off.
+	 * Bidirectional commands are not handled yet.
+	 */
+	if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
+		if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
+			fcp->resp.fr_flags |= FCP_RESID_OVER;
+		else
+			fcp->resp.fr_flags |= FCP_RESID_UNDER;
+		fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count);
+	}
+
+	/*
+	 * Send response.
+	 */
+	cmd->seq = lport->tt.seq_start_next(cmd->seq);
+	fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
+		       FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
+
+	lport->tt.seq_send(lport, cmd->seq, fp);
+	lport->tt.exch_done(cmd->seq);
+	return 0;
+}
+
+int ft_write_pending_status(struct se_cmd *se_cmd)
+{
+	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
+
+	return cmd->write_data_len != se_cmd->data_length;
+}
+
+/*
+ * Send TX_RDY (transfer ready).
+ */
+int ft_write_pending(struct se_cmd *se_cmd)
+{
+	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
+	struct fc_frame *fp;
+	struct fcp_txrdy *txrdy;
+	struct fc_lport *lport;
+	struct fc_exch *ep;
+	struct fc_frame_header *fh;
+	u32 f_ctl;
+
+	ft_dump_cmd(cmd, __func__);
+
+	if (cmd->aborted)
+		return 0;
+	ep = fc_seq_exch(cmd->seq);
+	lport = ep->lp;
+	fp = fc_frame_alloc(lport, sizeof(*txrdy));
+	if (!fp)
+		return -ENOMEM; /* Signal QUEUE_FULL */
+
+	txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
+	memset(txrdy, 0, sizeof(*txrdy));
+	txrdy->ft_burst_len = htonl(se_cmd->data_length);
+
+	cmd->seq = lport->tt.seq_start_next(cmd->seq);
+	fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
+		       FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+	fh = fc_frame_header_get(fp);
+	f_ctl = ntoh24(fh->fh_f_ctl);
+
+	/* Only if it is 'Exchange Responder' */
+	if (f_ctl & FC_FC_EX_CTX) {
+		/* Target is 'exchange responder' and sending XFER_READY
+		 * to 'exchange initiator (initiator)'
+		 */
+		if ((ep->xid <= lport->lro_xid) &&
+		    (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
+			if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
+				/*
+				 * cmd may have been broken up into multiple
+				 * tasks. Link their sgs together so we can
+				 * operate on them all at once.
+				 */
+				transport_do_task_sg_chain(se_cmd);
+				cmd->sg = se_cmd->t_tasks_sg_chained;
+				cmd->sg_cnt =
+					se_cmd->t_tasks_sg_chained_no;
+			}
+			if (cmd->sg && lport->tt.ddp_target(lport, ep->xid,
+							    cmd->sg,
+							    cmd->sg_cnt))
+				cmd->was_ddp_setup = 1;
+		}
+	}
+	lport->tt.seq_send(lport, cmd->seq, fp);
+	return 0;
+}
+
+u32 ft_get_task_tag(struct se_cmd *se_cmd)
+{
+	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
+
+	if (cmd->aborted)
+		return ~0;
+	return fc_seq_exch(cmd->seq)->rxid;
+}
+
+int ft_get_cmd_state(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+/*
+ * FC sequence response handler for follow-on sequences (data) and aborts.
+ */
+static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
+{
+	struct ft_cmd *cmd = arg;
+	struct fc_frame_header *fh;
+
+	if (unlikely(IS_ERR(fp))) {
+		/* XXX need to find cmd if queued */
+		cmd->seq = NULL;
+		cmd->aborted = true;
+		return;
+	}
+
+	fh = fc_frame_header_get(fp);
+
+	switch (fh->fh_r_ctl) {
+	case FC_RCTL_DD_SOL_DATA:	/* write data */
+		ft_recv_write_data(cmd, fp);
+		break;
+	case FC_RCTL_DD_UNSOL_CTL:	/* command */
+	case FC_RCTL_DD_SOL_CTL:	/* transfer ready */
+	case FC_RCTL_DD_DATA_DESC:	/* transfer ready */
+	default:
+		pr_debug("%s: unhandled frame r_ctl %x\n",
+		       __func__, fh->fh_r_ctl);
+		ft_invl_hw_context(cmd);
+		fc_frame_free(fp);
+		transport_generic_free_cmd(&cmd->se_cmd, 0);
+		break;
+	}
+}
+
+/*
+ * Send a FCP response including SCSI status and optional FCP rsp_code.
+ * status is SAM_STAT_GOOD (zero) iff code is valid.
+ * This is used in error cases, such as allocation failures.
+ */
+static void ft_send_resp_status(struct fc_lport *lport,
+				const struct fc_frame *rx_fp,
+				u32 status, enum fcp_resp_rsp_codes code)
+{
+	struct fc_frame *fp;
+	struct fc_seq *sp;
+	const struct fc_frame_header *fh;
+	size_t len;
+	struct fcp_resp_with_ext *fcp;
+	struct fcp_resp_rsp_info *info;
+
+	fh = fc_frame_header_get(rx_fp);
+	pr_debug("FCP error response: did %x oxid %x status %x code %x\n",
+		  ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
+	len = sizeof(*fcp);
+	if (status == SAM_STAT_GOOD)
+		len += sizeof(*info);
+	fp = fc_frame_alloc(lport, len);
+	if (!fp)
+		return;
+	fcp = fc_frame_payload_get(fp, len);
+	memset(fcp, 0, len);
+	fcp->resp.fr_status = status;
+	if (status == SAM_STAT_GOOD) {
+		fcp->ext.fr_rsp_len = htonl(sizeof(*info));
+		fcp->resp.fr_flags |= FCP_RSP_LEN_VAL;
+		info = (struct fcp_resp_rsp_info *)(fcp + 1);
+		info->rsp_code = code;
+	}
+
+	fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
+	sp = fr_seq(fp);
+	if (sp) {
+		lport->tt.seq_send(lport, sp, fp);
+		lport->tt.exch_done(sp);
+	} else {
+		lport->tt.frame_send(lport, fp);
+	}
+}
+
+/*
+ * Send error or task management response.
+ */
+static void ft_send_resp_code(struct ft_cmd *cmd,
+			      enum fcp_resp_rsp_codes code)
+{
+	ft_send_resp_status(cmd->sess->tport->lport,
+			    cmd->req_frame, SAM_STAT_GOOD, code);
+}
+
+
+/*
+ * Send error or task management response.
+ * Always frees the cmd and associated state.
+ */
+static void ft_send_resp_code_and_free(struct ft_cmd *cmd,
+				      enum fcp_resp_rsp_codes code)
+{
+	ft_send_resp_code(cmd, code);
+	ft_free_cmd(cmd);
+}
+
+/*
+ * Handle Task Management Request.
+ */
+static void ft_send_tm(struct ft_cmd *cmd)
+{
+	struct fcp_cmnd *fcp;
+	int rc;
+	u8 tm_func;
+
+	fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
+
+	switch (fcp->fc_tm_flags) {
+	case FCP_TMF_LUN_RESET:
+		tm_func = TMR_LUN_RESET;
+		break;
+	case FCP_TMF_TGT_RESET:
+		tm_func = TMR_TARGET_WARM_RESET;
+		break;
+	case FCP_TMF_CLR_TASK_SET:
+		tm_func = TMR_CLEAR_TASK_SET;
+		break;
+	case FCP_TMF_ABT_TASK_SET:
+		tm_func = TMR_ABORT_TASK_SET;
+		break;
+	case FCP_TMF_CLR_ACA:
+		tm_func = TMR_CLEAR_ACA;
+		break;
+	default:
+		/*
+		 * FCP4r01 indicates having a combination of
+		 * tm_flags set is invalid.
+		 */
+		pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
+		ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
+		return;
+	}
+
+	/* FIXME: Add referenced task tag for ABORT_TASK */
+	rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess,
+		&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
+		cmd, tm_func, GFP_KERNEL, 0, 0);
+	if (rc < 0)
+		ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
+}
+
+/*
+ * Send status from completed task management request.
+ */
+int ft_queue_tm_resp(struct se_cmd *se_cmd)
+{
+	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
+	struct se_tmr_req *tmr = se_cmd->se_tmr_req;
+	enum fcp_resp_rsp_codes code;
+
+	if (cmd->aborted)
+		return 0;
+	switch (tmr->response) {
+	case TMR_FUNCTION_COMPLETE:
+		code = FCP_TMF_CMPL;
+		break;
+	case TMR_LUN_DOES_NOT_EXIST:
+		code = FCP_TMF_INVALID_LUN;
+		break;
+	case TMR_FUNCTION_REJECTED:
+		code = FCP_TMF_REJECTED;
+		break;
+	case TMR_TASK_DOES_NOT_EXIST:
+	case TMR_TASK_STILL_ALLEGIANT:
+	case TMR_TASK_FAILOVER_NOT_SUPPORTED:
+	case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
+	case TMR_FUNCTION_AUTHORIZATION_FAILED:
+	default:
+		code = FCP_TMF_FAILED;
+		break;
+	}
+	pr_debug("tmr fn %d resp %d fcp code %d\n",
+		  tmr->function, tmr->response, code);
+	ft_send_resp_code(cmd, code);
+	return 0;
+}
+
+static void ft_send_work(struct work_struct *work);
+
+/*
+ * Handle incoming FCP command.
+ */
+static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
+{
+	struct ft_cmd *cmd;
+	struct fc_lport *lport = sess->tport->lport;
+
+	cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+	if (!cmd)
+		goto busy;
+	cmd->sess = sess;
+	cmd->seq = lport->tt.seq_assign(lport, fp);
+	if (!cmd->seq) {
+		kfree(cmd);
+		goto busy;
+	}
+	cmd->req_frame = fp;		/* hold frame during cmd */
+
+	INIT_WORK(&cmd->work, ft_send_work);
+	queue_work(sess->tport->tpg->workqueue, &cmd->work);
+	return;
+
+busy:
+	pr_debug("cmd or seq allocation failure - sending BUSY\n");
+	ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
+	fc_frame_free(fp);
+	ft_sess_put(sess);		/* undo get from lookup */
+}
+
+
+/*
+ * Handle incoming FCP frame.
+ * Caller has verified that the frame is type FCP.
+ */
+void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
+{
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+
+	switch (fh->fh_r_ctl) {
+	case FC_RCTL_DD_UNSOL_CMD:	/* command */
+		ft_recv_cmd(sess, fp);
+		break;
+	case FC_RCTL_DD_SOL_DATA:	/* write data */
+	case FC_RCTL_DD_UNSOL_CTL:
+	case FC_RCTL_DD_SOL_CTL:
+	case FC_RCTL_DD_DATA_DESC:	/* transfer ready */
+	case FC_RCTL_ELS4_REQ:		/* SRR, perhaps */
+	default:
+		pr_debug("%s: unhandled frame r_ctl %x\n",
+		       __func__, fh->fh_r_ctl);
+		fc_frame_free(fp);
+		ft_sess_put(sess);	/* undo get from lookup */
+		break;
+	}
+}
+
+/*
+ * Send new command to target.
+ */
+static void ft_send_work(struct work_struct *work)
+{
+	struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
+	struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
+	struct fcp_cmnd *fcp;
+	int data_dir = 0;
+	int task_attr;
+
+	fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
+	if (!fcp)
+		goto err;
+
+	if (fcp->fc_flags & FCP_CFL_LEN_MASK)
+		goto err;		/* not handling longer CDBs yet */
+
+	/*
+	 * Check for FCP task management flags
+	 */
+	if (fcp->fc_tm_flags) {
+		ft_send_tm(cmd);
+		return;
+	}
+
+	switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
+	case 0:
+		data_dir = DMA_NONE;
+		break;
+	case FCP_CFL_RDDATA:
+		data_dir = DMA_FROM_DEVICE;
+		break;
+	case FCP_CFL_WRDATA:
+		data_dir = DMA_TO_DEVICE;
+		break;
+	case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
+		goto err;	/* TBD not supported by tcm_fc yet */
+	}
+	/*
+	 * Locate the SAM Task Attr from fc_pri_ta
+	 */
+	switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
+	case FCP_PTA_HEADQ:
+		task_attr = MSG_HEAD_TAG;
+		break;
+	case FCP_PTA_ORDERED:
+		task_attr = MSG_ORDERED_TAG;
+		break;
+	case FCP_PTA_ACA:
+		task_attr = MSG_ACA_TAG;
+		break;
+	case FCP_PTA_SIMPLE: /* Fallthrough */
+	default:
+		task_attr = MSG_SIMPLE_TAG;
+	}
+
+	fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
+	/*
+	 * Use a single se_cmd->cmd_kref as we expect to release se_cmd
+	 * directly from ft_check_stop_free callback in response path.
+	 */
+	target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
+			&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
+			ntohl(fcp->fc_dl), task_attr, data_dir, 0);
+	pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
+	return;
+
+err:
+	ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/tfc_conf.c b/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/tfc_conf.c
new file mode 100644
index 0000000..2948dc9
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/tfc_conf.c
@@ -0,0 +1,651 @@
+/*******************************************************************************
+ * Filename:  tcm_fc.c
+ *
+ * This file contains the configfs implementation for TCM_fc fabric node.
+ * Based on tcm_loop_configfs.c
+ *
+ * Copyright (c) 2010 Cisco Systems, Inc.
+ * Copyright (c) 2009,2010 Rising Tide, Inc.
+ * Copyright (c) 2009,2010 Linux-iSCSI.org
+ *
+ * Copyright (c) 2009,2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/libfc.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "tcm_fc.h"
+
+struct target_fabric_configfs *ft_configfs;
+
+LIST_HEAD(ft_lport_list);
+DEFINE_MUTEX(ft_lport_lock);
+
+unsigned int ft_debug_logging;
+module_param_named(debug_logging, ft_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+/*
+ * Parse WWN.
+ * If strict, we require lower-case hex and colon separators to be sure
+ * the name is the same as what would be generated by ft_format_wwn()
+ * so the name and wwn are mapped one-to-one.
+ */
+static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
+{
+	const char *cp;
+	char c;
+	u32 byte = 0;
+	u32 pos = 0;
+	u32 err;
+	int val;
+
+	*wwn = 0;
+	for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) {
+		c = *cp;
+		if (c == '\n' && cp[1] == '\0')
+			continue;
+		if (strict && pos++ == 2 && byte++ < 7) {
+			pos = 0;
+			if (c == ':')
+				continue;
+			err = 1;
+			goto fail;
+		}
+		if (c == '\0') {
+			err = 2;
+			if (strict && byte != 8)
+				goto fail;
+			return cp - name;
+		}
+		err = 3;
+		val = hex_to_bin(c);
+		if (val < 0 || (strict && isupper(c)))
+			goto fail;
+		*wwn = (*wwn << 4) | val;
+	}
+	err = 4;
+fail:
+	pr_debug("err %u len %zu pos %u byte %u\n",
+		    err, cp - name, pos, byte);
+	return -1;
+}
+
+ssize_t ft_format_wwn(char *buf, size_t len, u64 wwn)
+{
+	u8 b[8];
+
+	put_unaligned_be64(wwn, b);
+	return snprintf(buf, len,
+		 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+		 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
+}
+
+static ssize_t ft_wwn_show(void *arg, char *buf)
+{
+	u64 *wwn = arg;
+	ssize_t len;
+
+	len = ft_format_wwn(buf, PAGE_SIZE - 2, *wwn);
+	buf[len++] = '\n';
+	return len;
+}
+
+static ssize_t ft_wwn_store(void *arg, const char *buf, size_t len)
+{
+	ssize_t ret;
+	u64 wwn;
+
+	ret = ft_parse_wwn(buf, &wwn, 0);
+	if (ret > 0)
+		*(u64 *)arg = wwn;
+	return ret;
+}
+
+/*
+ * ACL auth ops.
+ */
+
+static ssize_t ft_nacl_show_port_name(
+	struct se_node_acl *se_nacl,
+	char *page)
+{
+	struct ft_node_acl *acl = container_of(se_nacl,
+			struct ft_node_acl, se_node_acl);
+
+	return ft_wwn_show(&acl->node_auth.port_name, page);
+}
+
+static ssize_t ft_nacl_store_port_name(
+	struct se_node_acl *se_nacl,
+	const char *page,
+	size_t count)
+{
+	struct ft_node_acl *acl = container_of(se_nacl,
+			struct ft_node_acl, se_node_acl);
+
+	return ft_wwn_store(&acl->node_auth.port_name, page, count);
+}
+
+TF_NACL_BASE_ATTR(ft, port_name, S_IRUGO | S_IWUSR);
+
+static ssize_t ft_nacl_show_node_name(
+	struct se_node_acl *se_nacl,
+	char *page)
+{
+	struct ft_node_acl *acl = container_of(se_nacl,
+			struct ft_node_acl, se_node_acl);
+
+	return ft_wwn_show(&acl->node_auth.node_name, page);
+}
+
+static ssize_t ft_nacl_store_node_name(
+	struct se_node_acl *se_nacl,
+	const char *page,
+	size_t count)
+{
+	struct ft_node_acl *acl = container_of(se_nacl,
+			struct ft_node_acl, se_node_acl);
+
+	return ft_wwn_store(&acl->node_auth.node_name, page, count);
+}
+
+TF_NACL_BASE_ATTR(ft, node_name, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *ft_nacl_base_attrs[] = {
+	&ft_nacl_port_name.attr,
+	&ft_nacl_node_name.attr,
+	NULL,
+};
+
+/*
+ * ACL ops.
+ */
+
+/*
+ * Add ACL for an initiator.  The ACL is named arbitrarily.
+ * The port_name and/or node_name are attributes.
+ */
+static struct se_node_acl *ft_add_acl(
+	struct se_portal_group *se_tpg,
+	struct config_group *group,
+	const char *name)
+{
+	struct ft_node_acl *acl;
+	struct ft_tpg *tpg;
+	u64 wwpn;
+	u32 q_depth;
+
+	pr_debug("add acl %s\n", name);
+	tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
+
+	if (ft_parse_wwn(name, &wwpn, 1) < 0)
+		return ERR_PTR(-EINVAL);
+
+	acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL);
+	if (!acl)
+		return ERR_PTR(-ENOMEM);
+	acl->node_auth.port_name = wwpn;
+
+	q_depth = 32;		/* XXX bogus default - get from tpg? */
+	return core_tpg_add_initiator_node_acl(&tpg->se_tpg,
+				&acl->se_node_acl, name, q_depth);
+}
+
+static void ft_del_acl(struct se_node_acl *se_acl)
+{
+	struct se_portal_group *se_tpg = se_acl->se_tpg;
+	struct ft_tpg *tpg;
+	struct ft_node_acl *acl = container_of(se_acl,
+				struct ft_node_acl, se_node_acl);
+
+	pr_debug("del acl %s\n",
+		config_item_name(&se_acl->acl_group.cg_item));
+
+	tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
+	pr_debug("del acl %p se_acl %p tpg %p se_tpg %p\n",
+		    acl, se_acl, tpg, &tpg->se_tpg);
+
+	core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1);
+	kfree(acl);
+}
+
+struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
+{
+	struct ft_node_acl *found = NULL;
+	struct ft_node_acl *acl;
+	struct se_portal_group *se_tpg = &tpg->se_tpg;
+	struct se_node_acl *se_acl;
+
+	spin_lock_irq(&se_tpg->acl_node_lock);
+	list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
+		acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
+		pr_debug("acl %p port_name %llx\n",
+			acl, (unsigned long long)acl->node_auth.port_name);
+		if (acl->node_auth.port_name == rdata->ids.port_name ||
+		    acl->node_auth.node_name == rdata->ids.node_name) {
+			pr_debug("acl %p port_name %llx matched\n", acl,
+				    (unsigned long long)rdata->ids.port_name);
+			found = acl;
+			/* XXX need to hold onto ACL */
+			break;
+		}
+	}
+	spin_unlock_irq(&se_tpg->acl_node_lock);
+	return found;
+}
+
+struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
+{
+	struct ft_node_acl *acl;
+
+	acl = kzalloc(sizeof(*acl), GFP_KERNEL);
+	if (!acl) {
+		pr_err("Unable to allocate struct ft_node_acl\n");
+		return NULL;
+	}
+	pr_debug("acl %p\n", acl);
+	return &acl->se_node_acl;
+}
+
+static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg,
+				      struct se_node_acl *se_acl)
+{
+	struct ft_node_acl *acl = container_of(se_acl,
+				struct ft_node_acl, se_node_acl);
+
+	pr_debug("acl %p\n", acl);
+	kfree(acl);
+}
+
+/*
+ * local_port port_group (tpg) ops.
+ */
+static struct se_portal_group *ft_add_tpg(
+	struct se_wwn *wwn,
+	struct config_group *group,
+	const char *name)
+{
+	struct ft_lport_acl *lacl;
+	struct ft_tpg *tpg;
+	struct workqueue_struct *wq;
+	unsigned long index;
+	int ret;
+
+	pr_debug("tcm_fc: add tpg %s\n", name);
+
+	/*
+	 * Name must be "tpgt_" followed by the index.
+	 */
+	if (strstr(name, "tpgt_") != name)
+		return NULL;
+	if (strict_strtoul(name + 5, 10, &index) || index > UINT_MAX)
+		return NULL;
+
+	lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn);
+	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
+	if (!tpg)
+		return NULL;
+	tpg->index = index;
+	tpg->lport_acl = lacl;
+	INIT_LIST_HEAD(&tpg->lun_list);
+
+	wq = alloc_workqueue("tcm_fc", 0, 1);
+	if (!wq) {
+		kfree(tpg);
+		return NULL;
+	}
+
+	ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
+				tpg, TRANSPORT_TPG_TYPE_NORMAL);
+	if (ret < 0) {
+		destroy_workqueue(wq);
+		kfree(tpg);
+		return NULL;
+	}
+	tpg->workqueue = wq;
+
+	mutex_lock(&ft_lport_lock);
+	list_add_tail(&tpg->list, &lacl->tpg_list);
+	mutex_unlock(&ft_lport_lock);
+
+	return &tpg->se_tpg;
+}
+
+static void ft_del_tpg(struct se_portal_group *se_tpg)
+{
+	struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
+
+	pr_debug("del tpg %s\n",
+		    config_item_name(&tpg->se_tpg.tpg_group.cg_item));
+
+	destroy_workqueue(tpg->workqueue);
+
+	/* Wait for sessions to be freed thru RCU, for BUG_ON below */
+	synchronize_rcu();
+
+	mutex_lock(&ft_lport_lock);
+	list_del(&tpg->list);
+	if (tpg->tport) {
+		tpg->tport->tpg = NULL;
+		tpg->tport = NULL;
+	}
+	mutex_unlock(&ft_lport_lock);
+
+	core_tpg_deregister(se_tpg);
+	kfree(tpg);
+}
+
+/*
+ * Verify that an lport is configured to use the tcm_fc module, and return
+ * the target port group that should be used.
+ *
+ * The caller holds ft_lport_lock.
+ */
+struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
+{
+	struct ft_lport_acl *lacl;
+	struct ft_tpg *tpg;
+
+	list_for_each_entry(lacl, &ft_lport_list, list) {
+		if (lacl->wwpn == lport->wwpn) {
+			list_for_each_entry(tpg, &lacl->tpg_list, list)
+				return tpg; /* XXX for now return first entry */
+			return NULL;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * target config instance ops.
+ */
+
+/*
+ * Add lport to allowed config.
+ * The name is the WWPN in lower-case ASCII, colon-separated bytes.
+ */
+static struct se_wwn *ft_add_lport(
+	struct target_fabric_configfs *tf,
+	struct config_group *group,
+	const char *name)
+{
+	struct ft_lport_acl *lacl;
+	struct ft_lport_acl *old_lacl;
+	u64 wwpn;
+
+	pr_debug("add lport %s\n", name);
+	if (ft_parse_wwn(name, &wwpn, 1) < 0)
+		return NULL;
+	lacl = kzalloc(sizeof(*lacl), GFP_KERNEL);
+	if (!lacl)
+		return NULL;
+	lacl->wwpn = wwpn;
+	INIT_LIST_HEAD(&lacl->tpg_list);
+
+	mutex_lock(&ft_lport_lock);
+	list_for_each_entry(old_lacl, &ft_lport_list, list) {
+		if (old_lacl->wwpn == wwpn) {
+			mutex_unlock(&ft_lport_lock);
+			kfree(lacl);
+			return NULL;
+		}
+	}
+	list_add_tail(&lacl->list, &ft_lport_list);
+	ft_format_wwn(lacl->name, sizeof(lacl->name), wwpn);
+	mutex_unlock(&ft_lport_lock);
+
+	return &lacl->fc_lport_wwn;
+}
+
+static void ft_del_lport(struct se_wwn *wwn)
+{
+	struct ft_lport_acl *lacl = container_of(wwn,
+				struct ft_lport_acl, fc_lport_wwn);
+
+	pr_debug("del lport %s\n", lacl->name);
+	mutex_lock(&ft_lport_lock);
+	list_del(&lacl->list);
+	mutex_unlock(&ft_lport_lock);
+
+	kfree(lacl);
+}
+
+static ssize_t ft_wwn_show_attr_version(
+	struct target_fabric_configfs *tf,
+	char *page)
+{
+	return sprintf(page, "TCM FC " FT_VERSION " on %s/%s on "
+		""UTS_RELEASE"\n",  utsname()->sysname, utsname()->machine);
+}
+
+TF_WWN_ATTR_RO(ft, version);
+
+static struct configfs_attribute *ft_wwn_attrs[] = {
+	&ft_wwn_version.attr,
+	NULL,
+};
+
+static char *ft_get_fabric_name(void)
+{
+	return "fc";
+}
+
+static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+	struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
+
+	return tpg->lport_acl->name;
+}
+
+static u16 ft_get_tag(struct se_portal_group *se_tpg)
+{
+	struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
+
+	/*
+	 * This tag is used when forming SCSI Name identifier in EVPD=1 0x83
+	 * to represent the SCSI Target Port.
+	 */
+	return tpg->index;
+}
+
+static u32 ft_get_default_depth(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static int ft_check_false(struct se_portal_group *se_tpg)
+{
+	return 0;
+}
+
+static void ft_set_default_node_attr(struct se_node_acl *se_nacl)
+{
+}
+
+static u16 ft_get_fabric_sense_len(void)
+{
+	return 0;
+}
+
+static u16 ft_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_len)
+{
+	return 0;
+}
+
+static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+	struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
+
+	return tpg->index;
+}
+
+static struct target_core_fabric_ops ft_fabric_ops = {
+	.get_fabric_name =		ft_get_fabric_name,
+	.get_fabric_proto_ident =	fc_get_fabric_proto_ident,
+	.tpg_get_wwn =			ft_get_fabric_wwn,
+	.tpg_get_tag =			ft_get_tag,
+	.tpg_get_default_depth =	ft_get_default_depth,
+	.tpg_get_pr_transport_id =	fc_get_pr_transport_id,
+	.tpg_get_pr_transport_id_len =	fc_get_pr_transport_id_len,
+	.tpg_parse_pr_out_transport_id = fc_parse_pr_out_transport_id,
+	.tpg_check_demo_mode =		ft_check_false,
+	.tpg_check_demo_mode_cache =	ft_check_false,
+	.tpg_check_demo_mode_write_protect = ft_check_false,
+	.tpg_check_prod_mode_write_protect = ft_check_false,
+	.tpg_alloc_fabric_acl =		ft_tpg_alloc_fabric_acl,
+	.tpg_release_fabric_acl =	ft_tpg_release_fabric_acl,
+	.tpg_get_inst_index =		ft_tpg_get_inst_index,
+	.check_stop_free =		ft_check_stop_free,
+	.release_cmd =			ft_release_cmd,
+	.shutdown_session =		ft_sess_shutdown,
+	.close_session =		ft_sess_close,
+	.sess_get_index =		ft_sess_get_index,
+	.sess_get_initiator_sid =	NULL,
+	.write_pending =		ft_write_pending,
+	.write_pending_status =		ft_write_pending_status,
+	.set_default_node_attributes =	ft_set_default_node_attr,
+	.get_task_tag =			ft_get_task_tag,
+	.get_cmd_state =		ft_get_cmd_state,
+	.queue_data_in =		ft_queue_data_in,
+	.queue_status =			ft_queue_status,
+	.queue_tm_rsp =			ft_queue_tm_resp,
+	.get_fabric_sense_len =		ft_get_fabric_sense_len,
+	.set_fabric_sense_len =		ft_set_fabric_sense_len,
+	/*
+	 * Setup function pointers for generic logic in
+	 * target_core_fabric_configfs.c
+	 */
+	.fabric_make_wwn =		&ft_add_lport,
+	.fabric_drop_wwn =		&ft_del_lport,
+	.fabric_make_tpg =		&ft_add_tpg,
+	.fabric_drop_tpg =		&ft_del_tpg,
+	.fabric_post_link =		NULL,
+	.fabric_pre_unlink =		NULL,
+	.fabric_make_np =		NULL,
+	.fabric_drop_np =		NULL,
+	.fabric_make_nodeacl =		&ft_add_acl,
+	.fabric_drop_nodeacl =		&ft_del_acl,
+};
+
+int ft_register_configfs(void)
+{
+	struct target_fabric_configfs *fabric;
+	int ret;
+
+	/*
+	 * Register the top level struct config_item_type with TCM core
+	 */
+	fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
+	if (IS_ERR(fabric)) {
+		pr_err("%s: target_fabric_configfs_init() failed!\n",
+		       __func__);
+		return PTR_ERR(fabric);
+	}
+	fabric->tf_ops = ft_fabric_ops;
+
+	/* Allowing support for task_sg_chaining */
+	fabric->tf_ops.task_sg_chaining = 1;
+
+	/*
+	 * Setup default attribute lists for various fabric->tf_cit_tmpl
+	 */
+	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs =
+						    ft_nacl_base_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+	/*
+	 * register the fabric for use within TCM
+	 */
+	ret = target_fabric_configfs_register(fabric);
+	if (ret < 0) {
+		pr_debug("target_fabric_configfs_register() for"
+			    " FC Target failed!\n");
+		target_fabric_configfs_free(fabric);
+		return -1;
+	}
+
+	/*
+	 * Setup our local pointer to *fabric.
+	 */
+	ft_configfs = fabric;
+	return 0;
+}
+
+void ft_deregister_configfs(void)
+{
+	if (!ft_configfs)
+		return;
+	target_fabric_configfs_deregister(ft_configfs);
+	ft_configfs = NULL;
+}
+
+static struct notifier_block ft_notifier = {
+	.notifier_call = ft_lport_notify
+};
+
+static int __init ft_init(void)
+{
+	if (ft_register_configfs())
+		return -1;
+	if (fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov)) {
+		ft_deregister_configfs();
+		return -1;
+	}
+	blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier);
+	fc_lport_iterate(ft_lport_add, NULL);
+	return 0;
+}
+
+static void __exit ft_exit(void)
+{
+	blocking_notifier_chain_unregister(&fc_lport_notifier_head,
+					   &ft_notifier);
+	fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov);
+	fc_lport_iterate(ft_lport_del, NULL);
+	ft_deregister_configfs();
+	synchronize_rcu();
+}
+
+MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION);
+MODULE_LICENSE("GPL");
+module_init(ft_init);
+module_exit(ft_exit);
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/tfc_io.c b/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/tfc_io.c
new file mode 100644
index 0000000..8d5cf53
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/tfc_io.c
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 2010 Cisco Systems, Inc.
+ *
+ * Portions based on tcm_loop_fabric_scsi.c and libfc/fc_fcp.c
+ *
+ * Copyright (c) 2007 Intel Corporation. All rights reserved.
+ * Copyright (c) 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright (c) 2008 Mike Christie
+ * Copyright (c) 2009 Rising Tide, Inc.
+ * Copyright (c) 2009 Linux-iSCSI.org
+ * Copyright (c) 2009 Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/* XXX TBD some includes may be extraneous */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/hash.h>
+#include <linux/ratelimit.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "tcm_fc.h"
+
+/*
+ * Deliver read data back to initiator.
+ * XXX TBD handle resource problems later.
+ */
+int ft_queue_data_in(struct se_cmd *se_cmd)
+{
+	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
+	struct fc_frame *fp = NULL;
+	struct fc_exch *ep;
+	struct fc_lport *lport;
+	struct scatterlist *sg = NULL;
+	size_t remaining;
+	u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
+	u32 mem_off = 0;
+	u32 fh_off = 0;
+	u32 frame_off = 0;
+	size_t frame_len = 0;
+	size_t mem_len = 0;
+	size_t tlen;
+	size_t off_in_page;
+	struct page *page = NULL;
+	int use_sg;
+	int error;
+	void *page_addr;
+	void *from;
+	void *to = NULL;
+
+	if (cmd->aborted)
+		return 0;
+	ep = fc_seq_exch(cmd->seq);
+	lport = ep->lp;
+	cmd->seq = lport->tt.seq_start_next(cmd->seq);
+
+	remaining = se_cmd->data_length;
+
+	/*
+	 * Setup to use first mem list entry, unless no data.
+	 */
+	BUG_ON(remaining && !se_cmd->t_data_sg);
+	if (remaining) {
+		sg = se_cmd->t_data_sg;
+		mem_len = sg->length;
+		mem_off = sg->offset;
+		page = sg_page(sg);
+	}
+
+	/* no scatter/gather in skb for odd word length due to fc_seq_send() */
+	use_sg = !(remaining % 4);
+
+	while (remaining) {
+		if (!mem_len) {
+			sg = sg_next(sg);
+			mem_len = min((size_t)sg->length, remaining);
+			mem_off = sg->offset;
+			page = sg_page(sg);
+		}
+		if (!frame_len) {
+			/*
+			 * If lport's has capability of Large Send Offload LSO)
+			 * , then allow 'frame_len' to be as big as 'lso_max'
+			 * if indicated transfer length is >= lport->lso_max
+			 */
+			frame_len = (lport->seq_offload) ? lport->lso_max :
+							  cmd->sess->max_frame;
+			frame_len = min(frame_len, remaining);
+			fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
+			if (!fp)
+				return -ENOMEM;
+			to = fc_frame_payload_get(fp, 0);
+			fh_off = frame_off;
+			frame_off += frame_len;
+			/*
+			 * Setup the frame's max payload which is used by base
+			 * driver to indicate HW about max frame size, so that
+			 * HW can do fragmentation appropriately based on
+			 * "gso_max_size" of underline netdev.
+			 */
+			fr_max_payload(fp) = cmd->sess->max_frame;
+		}
+		tlen = min(mem_len, frame_len);
+
+		if (use_sg) {
+			off_in_page = mem_off;
+			BUG_ON(!page);
+			get_page(page);
+			skb_fill_page_desc(fp_skb(fp),
+					   skb_shinfo(fp_skb(fp))->nr_frags,
+					   page, off_in_page, tlen);
+			fr_len(fp) += tlen;
+			fp_skb(fp)->data_len += tlen;
+			fp_skb(fp)->truesize +=
+					PAGE_SIZE << compound_order(page);
+		} else {
+			BUG_ON(!page);
+			from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
+			page_addr = from;
+			from += mem_off & ~PAGE_MASK;
+			tlen = min(tlen, (size_t)(PAGE_SIZE -
+						(mem_off & ~PAGE_MASK)));
+			memcpy(to, from, tlen);
+			kunmap_atomic(page_addr);
+			to += tlen;
+		}
+
+		mem_off += tlen;
+		mem_len -= tlen;
+		frame_len -= tlen;
+		remaining -= tlen;
+
+		if (frame_len &&
+		    (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN))
+			continue;
+		if (!remaining)
+			f_ctl |= FC_FC_END_SEQ;
+		fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
+			       FC_TYPE_FCP, f_ctl, fh_off);
+		error = lport->tt.seq_send(lport, cmd->seq, fp);
+		if (error) {
+			/* XXX For now, initiator will retry */
+			pr_err_ratelimited("%s: Failed to send frame %p, "
+						"xid <0x%x>, remaining %zu, "
+						"lso_max <0x%x>\n",
+						__func__, fp, ep->xid,
+						remaining, lport->lso_max);
+		}
+	}
+	return ft_queue_status(se_cmd);
+}
+
+/*
+ * Receive write data frame.
+ */
+void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
+{
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct fc_seq *seq = cmd->seq;
+	struct fc_exch *ep;
+	struct fc_lport *lport;
+	struct fc_frame_header *fh;
+	struct scatterlist *sg = NULL;
+	u32 mem_off = 0;
+	u32 rel_off;
+	size_t frame_len;
+	size_t mem_len = 0;
+	size_t tlen;
+	struct page *page = NULL;
+	void *page_addr;
+	void *from;
+	void *to;
+	u32 f_ctl;
+	void *buf;
+
+	fh = fc_frame_header_get(fp);
+	if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
+		goto drop;
+
+	f_ctl = ntoh24(fh->fh_f_ctl);
+	ep = fc_seq_exch(seq);
+	lport = ep->lp;
+	if (cmd->was_ddp_setup) {
+		BUG_ON(!ep);
+		BUG_ON(!lport);
+		/*
+		 * Since DDP (Large Rx offload) was setup for this request,
+		 * payload is expected to be copied directly to user buffers.
+		 */
+		buf = fc_frame_payload_get(fp, 1);
+		if (buf)
+			pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
+				"cmd->sg_cnt 0x%x. DDP was setup"
+				" hence not expected to receive frame with "
+				"payload, Frame will be dropped if"
+				"'Sequence Initiative' bit in f_ctl is"
+				"not set\n", __func__, ep->xid, f_ctl,
+				cmd->sg, cmd->sg_cnt);
+		/*
+		 * Invalidate HW DDP context if it was setup for respective
+		 * command. Invalidation of HW DDP context is requited in both
+		 * situation (success and error).
+		 */
+		ft_invl_hw_context(cmd);
+
+		/*
+		 * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
+		 * write data frame is received successfully where payload is
+		 * posted directly to user buffer and only the last frame's
+		 * header is posted in receive queue.
+		 *
+		 * If "Sequence Initiative (TSI)" bit is not set, means error
+		 * condition w.r.t. DDP, hence drop the packet and let explict
+		 * ABORTS from other end of exchange timer trigger the recovery.
+		 */
+		if (f_ctl & FC_FC_SEQ_INIT)
+			goto last_frame;
+		else
+			goto drop;
+	}
+
+	rel_off = ntohl(fh->fh_parm_offset);
+	frame_len = fr_len(fp);
+	if (frame_len <= sizeof(*fh))
+		goto drop;
+	frame_len -= sizeof(*fh);
+	from = fc_frame_payload_get(fp, 0);
+	if (rel_off >= se_cmd->data_length)
+		goto drop;
+	if (frame_len + rel_off > se_cmd->data_length)
+		frame_len = se_cmd->data_length - rel_off;
+
+	/*
+	 * Setup to use first mem list entry, unless no data.
+	 */
+	BUG_ON(frame_len && !se_cmd->t_data_sg);
+	if (frame_len) {
+		sg = se_cmd->t_data_sg;
+		mem_len = sg->length;
+		mem_off = sg->offset;
+		page = sg_page(sg);
+	}
+
+	while (frame_len) {
+		if (!mem_len) {
+			sg = sg_next(sg);
+			mem_len = sg->length;
+			mem_off = sg->offset;
+			page = sg_page(sg);
+		}
+		if (rel_off >= mem_len) {
+			rel_off -= mem_len;
+			mem_len = 0;
+			continue;
+		}
+		mem_off += rel_off;
+		mem_len -= rel_off;
+		rel_off = 0;
+
+		tlen = min(mem_len, frame_len);
+
+		to = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
+		page_addr = to;
+		to += mem_off & ~PAGE_MASK;
+		tlen = min(tlen, (size_t)(PAGE_SIZE -
+					  (mem_off & ~PAGE_MASK)));
+		memcpy(to, from, tlen);
+		kunmap_atomic(page_addr);
+
+		from += tlen;
+		frame_len -= tlen;
+		mem_off += tlen;
+		mem_len -= tlen;
+		cmd->write_data_len += tlen;
+	}
+last_frame:
+	if (cmd->write_data_len == se_cmd->data_length)
+		transport_generic_handle_data(se_cmd);
+drop:
+	fc_frame_free(fp);
+}
+
+/*
+ * Handle and cleanup any HW specific resources if
+ * received ABORTS, errors, timeouts.
+ */
+void ft_invl_hw_context(struct ft_cmd *cmd)
+{
+	struct fc_seq *seq = cmd->seq;
+	struct fc_exch *ep = NULL;
+	struct fc_lport *lport = NULL;
+
+	BUG_ON(!cmd);
+
+	/* Cleanup the DDP context in HW if DDP was setup */
+	if (cmd->was_ddp_setup && seq) {
+		ep = fc_seq_exch(seq);
+		if (ep) {
+			lport = ep->lp;
+			if (lport && (ep->xid <= lport->lro_xid)) {
+				/*
+				 * "ddp_done" trigger invalidation of HW
+				 * specific DDP context
+				 */
+				cmd->write_data_len = lport->tt.ddp_done(lport,
+								      ep->xid);
+
+				/*
+				 * Resetting same variable to indicate HW's
+				 * DDP context has been invalidated to avoid
+				 * re_invalidation of same context (context is
+				 * identified using ep->xid)
+				 */
+				cmd->was_ddp_setup = 0;
+			}
+		}
+	}
+}
diff --git a/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/tfc_sess.c b/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/tfc_sess.c
new file mode 100644
index 0000000..5ac8d56
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/target/tcm_fc/tfc_sess.c
@@ -0,0 +1,512 @@
+/*
+ * Copyright (c) 2010 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/* XXX TBD some includes may be extraneous */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/hash.h>
+#include <linux/rcupdate.h>
+#include <linux/rculist.h>
+#include <linux/kref.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/libfc.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "tcm_fc.h"
+
+static void ft_sess_delete_all(struct ft_tport *);
+
+/*
+ * Lookup or allocate target local port.
+ * Caller holds ft_lport_lock.
+ */
+static struct ft_tport *ft_tport_create(struct fc_lport *lport)
+{
+	struct ft_tpg *tpg;
+	struct ft_tport *tport;
+	int i;
+
+	tport = rcu_dereference_protected(lport->prov[FC_TYPE_FCP],
+					  lockdep_is_held(&ft_lport_lock));
+	if (tport && tport->tpg)
+		return tport;
+
+	tpg = ft_lport_find_tpg(lport);
+	if (!tpg)
+		return NULL;
+
+	if (tport) {
+		tport->tpg = tpg;
+		tpg->tport = tport;
+		return tport;
+	}
+
+	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
+	if (!tport)
+		return NULL;
+
+	tport->lport = lport;
+	tport->tpg = tpg;
+	tpg->tport = tport;
+	for (i = 0; i < FT_SESS_HASH_SIZE; i++)
+		INIT_HLIST_HEAD(&tport->hash[i]);
+
+	rcu_assign_pointer(lport->prov[FC_TYPE_FCP], tport);
+	return tport;
+}
+
+/*
+ * Delete a target local port.
+ * Caller holds ft_lport_lock.
+ */
+static void ft_tport_delete(struct ft_tport *tport)
+{
+	struct fc_lport *lport;
+	struct ft_tpg *tpg;
+
+	ft_sess_delete_all(tport);
+	lport = tport->lport;
+	BUG_ON(tport != lport->prov[FC_TYPE_FCP]);
+	rcu_assign_pointer(lport->prov[FC_TYPE_FCP], NULL);
+
+	tpg = tport->tpg;
+	if (tpg) {
+		tpg->tport = NULL;
+		tport->tpg = NULL;
+	}
+	kfree_rcu(tport, rcu);
+}
+
+/*
+ * Add local port.
+ * Called thru fc_lport_iterate().
+ */
+void ft_lport_add(struct fc_lport *lport, void *arg)
+{
+	mutex_lock(&ft_lport_lock);
+	ft_tport_create(lport);
+	mutex_unlock(&ft_lport_lock);
+}
+
+/*
+ * Delete local port.
+ * Called thru fc_lport_iterate().
+ */
+void ft_lport_del(struct fc_lport *lport, void *arg)
+{
+	struct ft_tport *tport;
+
+	mutex_lock(&ft_lport_lock);
+	tport = lport->prov[FC_TYPE_FCP];
+	if (tport)
+		ft_tport_delete(tport);
+	mutex_unlock(&ft_lport_lock);
+}
+
+/*
+ * Notification of local port change from libfc.
+ * Create or delete local port and associated tport.
+ */
+int ft_lport_notify(struct notifier_block *nb, unsigned long event, void *arg)
+{
+	struct fc_lport *lport = arg;
+
+	switch (event) {
+	case FC_LPORT_EV_ADD:
+		ft_lport_add(lport, NULL);
+		break;
+	case FC_LPORT_EV_DEL:
+		ft_lport_del(lport, NULL);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+/*
+ * Hash function for FC_IDs.
+ */
+static u32 ft_sess_hash(u32 port_id)
+{
+	return hash_32(port_id, FT_SESS_HASH_BITS);
+}
+
+/*
+ * Find session in local port.
+ * Sessions and hash lists are RCU-protected.
+ * A reference is taken which must be eventually freed.
+ */
+static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
+{
+	struct ft_tport *tport;
+	struct hlist_head *head;
+	struct hlist_node *pos;
+	struct ft_sess *sess;
+
+	rcu_read_lock();
+	tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
+	if (!tport)
+		goto out;
+
+	head = &tport->hash[ft_sess_hash(port_id)];
+	hlist_for_each_entry_rcu(sess, pos, head, hash) {
+		if (sess->port_id == port_id) {
+			kref_get(&sess->kref);
+			rcu_read_unlock();
+			pr_debug("port_id %x found %p\n", port_id, sess);
+			return sess;
+		}
+	}
+out:
+	rcu_read_unlock();
+	pr_debug("port_id %x not found\n", port_id);
+	return NULL;
+}
+
+/*
+ * Allocate session and enter it in the hash for the local port.
+ * Caller holds ft_lport_lock.
+ */
+static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
+				      struct ft_node_acl *acl)
+{
+	struct ft_sess *sess;
+	struct hlist_head *head;
+	struct hlist_node *pos;
+
+	head = &tport->hash[ft_sess_hash(port_id)];
+	hlist_for_each_entry_rcu(sess, pos, head, hash)
+		if (sess->port_id == port_id)
+			return sess;
+
+	sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+	if (!sess)
+		return NULL;
+
+	sess->se_sess = transport_init_session();
+	if (IS_ERR(sess->se_sess)) {
+		kfree(sess);
+		return NULL;
+	}
+	sess->se_sess->se_node_acl = &acl->se_node_acl;
+	sess->tport = tport;
+	sess->port_id = port_id;
+	kref_init(&sess->kref);	/* ref for table entry */
+	hlist_add_head_rcu(&sess->hash, head);
+	tport->sess_count++;
+
+	pr_debug("port_id %x sess %p\n", port_id, sess);
+
+	transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl,
+				   sess->se_sess, sess);
+	return sess;
+}
+
+/*
+ * Unhash the session.
+ * Caller holds ft_lport_lock.
+ */
+static void ft_sess_unhash(struct ft_sess *sess)
+{
+	struct ft_tport *tport = sess->tport;
+
+	hlist_del_rcu(&sess->hash);
+	BUG_ON(!tport->sess_count);
+	tport->sess_count--;
+	sess->port_id = -1;
+	sess->params = 0;
+}
+
+/*
+ * Delete session from hash.
+ * Caller holds ft_lport_lock.
+ */
+static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
+{
+	struct hlist_head *head;
+	struct hlist_node *pos;
+	struct ft_sess *sess;
+
+	head = &tport->hash[ft_sess_hash(port_id)];
+	hlist_for_each_entry_rcu(sess, pos, head, hash) {
+		if (sess->port_id == port_id) {
+			ft_sess_unhash(sess);
+			return sess;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * Delete all sessions from tport.
+ * Caller holds ft_lport_lock.
+ */
+static void ft_sess_delete_all(struct ft_tport *tport)
+{
+	struct hlist_head *head;
+	struct hlist_node *pos;
+	struct ft_sess *sess;
+
+	for (head = tport->hash;
+	     head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
+		hlist_for_each_entry_rcu(sess, pos, head, hash) {
+			ft_sess_unhash(sess);
+			transport_deregister_session_configfs(sess->se_sess);
+			ft_sess_put(sess);	/* release from table */
+		}
+	}
+}
+
+/*
+ * TCM ops for sessions.
+ */
+
+/*
+ * Determine whether session is allowed to be shutdown in the current context.
+ * Returns non-zero if the session should be shutdown.
+ */
+int ft_sess_shutdown(struct se_session *se_sess)
+{
+	struct ft_sess *sess = se_sess->fabric_sess_ptr;
+
+	pr_debug("port_id %x\n", sess->port_id);
+	return 1;
+}
+
+/*
+ * Remove session and send PRLO.
+ * This is called when the ACL is being deleted or queue depth is changing.
+ */
+void ft_sess_close(struct se_session *se_sess)
+{
+	struct ft_sess *sess = se_sess->fabric_sess_ptr;
+	u32 port_id;
+
+	mutex_lock(&ft_lport_lock);
+	port_id = sess->port_id;
+	if (port_id == -1) {
+		mutex_unlock(&ft_lport_lock);
+		return;
+	}
+	pr_debug("port_id %x\n", port_id);
+	ft_sess_unhash(sess);
+	mutex_unlock(&ft_lport_lock);
+	transport_deregister_session_configfs(se_sess);
+	ft_sess_put(sess);
+	/* XXX Send LOGO or PRLO */
+	synchronize_rcu();		/* let transport deregister happen */
+}
+
+u32 ft_sess_get_index(struct se_session *se_sess)
+{
+	struct ft_sess *sess = se_sess->fabric_sess_ptr;
+
+	return sess->port_id;	/* XXX TBD probably not what is needed */
+}
+
+u32 ft_sess_get_port_name(struct se_session *se_sess,
+			  unsigned char *buf, u32 len)
+{
+	struct ft_sess *sess = se_sess->fabric_sess_ptr;
+
+	return ft_format_wwn(buf, len, sess->port_name);
+}
+
+/*
+ * libfc ops involving sessions.
+ */
+
+static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
+			  const struct fc_els_spp *rspp, struct fc_els_spp *spp)
+{
+	struct ft_tport *tport;
+	struct ft_sess *sess;
+	struct ft_node_acl *acl;
+	u32 fcp_parm;
+
+	tport = ft_tport_create(rdata->local_port);
+	if (!tport)
+		goto not_target;	/* not a target for this local port */
+
+	acl = ft_acl_get(tport->tpg, rdata);
+	if (!acl)
+		goto not_target;	/* no target for this remote */
+
+	if (!rspp)
+		goto fill;
+
+	if (rspp->spp_flags & (FC_SPP_OPA_VAL | FC_SPP_RPA_VAL))
+		return FC_SPP_RESP_NO_PA;
+
+	/*
+	 * If both target and initiator bits are off, the SPP is invalid.
+	 */
+	fcp_parm = ntohl(rspp->spp_params);
+	if (!(fcp_parm & (FCP_SPPF_INIT_FCN | FCP_SPPF_TARG_FCN)))
+		return FC_SPP_RESP_INVL;
+
+	/*
+	 * Create session (image pair) only if requested by
+	 * EST_IMG_PAIR flag and if the requestor is an initiator.
+	 */
+	if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) {
+		spp->spp_flags |= FC_SPP_EST_IMG_PAIR;
+		if (!(fcp_parm & FCP_SPPF_INIT_FCN))
+			return FC_SPP_RESP_CONF;
+		sess = ft_sess_create(tport, rdata->ids.port_id, acl);
+		if (!sess)
+			return FC_SPP_RESP_RES;
+		if (!sess->params)
+			rdata->prli_count++;
+		sess->params = fcp_parm;
+		sess->port_name = rdata->ids.port_name;
+		sess->max_frame = rdata->maxframe_size;
+
+		/* XXX TBD - clearing actions.  unit attn, see 4.10 */
+	}
+
+	/*
+	 * OR in our service parameters with other provider (initiator), if any.
+	 */
+fill:
+	fcp_parm = ntohl(spp->spp_params);
+	fcp_parm &= ~FCP_SPPF_RETRY;
+	spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
+	return FC_SPP_RESP_ACK;
+
+not_target:
+	fcp_parm = ntohl(spp->spp_params);
+	fcp_parm &= ~FCP_SPPF_TARG_FCN;
+	spp->spp_params = htonl(fcp_parm);
+	return 0;
+}
+
+/**
+ * tcm_fcp_prli() - Handle incoming or outgoing PRLI for the FCP target
+ * @rdata: remote port private
+ * @spp_len: service parameter page length
+ * @rspp: received service parameter page (NULL for outgoing PRLI)
+ * @spp: response service parameter page
+ *
+ * Returns spp response code.
+ */
+static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
+		   const struct fc_els_spp *rspp, struct fc_els_spp *spp)
+{
+	int ret;
+
+	mutex_lock(&ft_lport_lock);
+	ret = ft_prli_locked(rdata, spp_len, rspp, spp);
+	mutex_unlock(&ft_lport_lock);
+	pr_debug("port_id %x flags %x ret %x\n",
+	       rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
+	return ret;
+}
+
+static void ft_sess_rcu_free(struct rcu_head *rcu)
+{
+	struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
+
+	kfree(sess);
+}
+
+static void ft_sess_free(struct kref *kref)
+{
+	struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
+
+	transport_deregister_session(sess->se_sess);
+	call_rcu(&sess->rcu, ft_sess_rcu_free);
+}
+
+void ft_sess_put(struct ft_sess *sess)
+{
+	int sess_held = atomic_read(&sess->kref.refcount);
+
+	BUG_ON(!sess_held);
+	kref_put(&sess->kref, ft_sess_free);
+}
+
+static void ft_prlo(struct fc_rport_priv *rdata)
+{
+	struct ft_sess *sess;
+	struct ft_tport *tport;
+
+	mutex_lock(&ft_lport_lock);
+	tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]);
+	if (!tport) {
+		mutex_unlock(&ft_lport_lock);
+		return;
+	}
+	sess = ft_sess_delete(tport, rdata->ids.port_id);
+	if (!sess) {
+		mutex_unlock(&ft_lport_lock);
+		return;
+	}
+	mutex_unlock(&ft_lport_lock);
+	transport_deregister_session_configfs(sess->se_sess);
+	ft_sess_put(sess);		/* release from table */
+	rdata->prli_count--;
+	/* XXX TBD - clearing actions.  unit attn, see 4.10 */
+}
+
+/*
+ * Handle incoming FCP request.
+ * Caller has verified that the frame is type FCP.
+ */
+static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
+{
+	struct ft_sess *sess;
+	u32 sid = fc_frame_sid(fp);
+
+	pr_debug("sid %x\n", sid);
+
+	sess = ft_sess_get(lport, sid);
+	if (!sess) {
+		pr_debug("sid %x sess lookup failed\n", sid);
+		/* TBD XXX - if FCP_CMND, send PRLO */
+		fc_frame_free(fp);
+		return;
+	}
+	ft_recv_req(sess, fp);	/* must do ft_sess_put() */
+}
+
+/*
+ * Provider ops for libfc.
+ */
+struct fc4_prov ft_prov = {
+	.prli = ft_prli,
+	.prlo = ft_prlo,
+	.recv = ft_recv,
+	.module = THIS_MODULE,
+};